From 385eeba4c0e5143f9cbc2a4e6d9715ed060c9075 Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Wed, 7 Mar 2018 22:38:52 +0100 Subject: [PATCH 01/13] haproxy: Fix Lua-support for mips(el) - TARGET_CFLAGS were missing for haproxy which caused issue #4606 (https://github.com/openwrt/packages/issues/4606) - All targets finally have Lua support again Signed-off-by: Christian Lachner --- net/haproxy/Makefile | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index e5d124b97..da0b3c474 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -94,13 +94,6 @@ endef ENABLE_LUA:=y ENABLE_REGPARM:=n -ifeq ($(CONFIG_mips),y) - ENABLE_LUA:=n -endif -ifeq ($(CONFIG_mipsel),y) - ENABLE_LUA:=n -endif - ifeq ($(CONFIG_TARGET_x86),y) ENABLE_REGPARM:=y endif @@ -152,6 +145,7 @@ define Build/Compile USE_ZLIB=yes USE_PCRE=1 USE_PCRE_JIT=1 USE_GETADDRINFO=1 \ VERSION="$(PKG_VERSION)-patch$(PKG_RELEASE)" \ $(ADDON) \ + CFLAGS="$(TARGET_CFLAGS)" \ LD="$(TARGET_CC)" \ LDFLAGS="$(TARGET_LDFLAGS) -latomic" \ IGNOREGIT=1 From 88fdeb5085a2de4448c5e874900a8846dbc5bdaf Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Fri, 16 Mar 2018 11:26:28 +0100 Subject: [PATCH 02/13] haproxy: Update MEDIUM+ patches for HAProxy v1.8.4 - Add new MEDIUM+ patches (see https://www.haproxy.org/bugs/bugs-1.8.4.html) - Raise patch-level to 02 Signed-off-by: Christian Lachner --- net/haproxy/Makefile | 2 +- ...ing-data-after-end-of-output-buffers.patch | 71 +++++++++++++++++++ ...r-Fix-the-wrapping-case-in-bo_putblk.patch | 33 +++++++++ ...r-Fix-the-wrapping-case-in-bi_putblk.patch | 33 +++++++++ ...also-arm-the-h2-timeout-when-sending.patch | 46 ++++++++++++ ...age-with-cpu-map-and-nbthread-nbproc.patch | 55 ++++++++++++++ ...m-idle-list-when-HAProxy-is-stopping.patch | 43 +++++++++++ 7 files changed, 282 insertions(+), 1 deletion(-) create mode 100644 net/haproxy/patches/0005-BUG-MEDIUM-h2-always-consume-any-trailing-data-after-end-of-output-buffers.patch create mode 100644 net/haproxy/patches/0006-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bo_putblk.patch create mode 100644 net/haproxy/patches/0007-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bi_putblk.patch create mode 100644 net/haproxy/patches/0008-BUG-MEDIUM-h2-also-arm-the-h2-timeout-when-sending.patch create mode 100644 net/haproxy/patches/0009-BUG-MEDIUM-fix-a-100-cpu-usage-with-cpu-map-and-nbthread-nbproc.patch create mode 100644 net/haproxy/patches/0010-BUG-MEDIUM-spoe-Remove-idle-applets-from-idle-list-when-HAProxy-is-stopping.patch diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index da0b3c474..0a37b8f38 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -10,7 +10,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy PKG_VERSION:=1.8.4 -PKG_RELEASE:=01 +PKG_RELEASE:=02 PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/ diff --git a/net/haproxy/patches/0005-BUG-MEDIUM-h2-always-consume-any-trailing-data-after-end-of-output-buffers.patch b/net/haproxy/patches/0005-BUG-MEDIUM-h2-always-consume-any-trailing-data-after-end-of-output-buffers.patch new file mode 100644 index 000000000..2ed041f39 --- /dev/null +++ b/net/haproxy/patches/0005-BUG-MEDIUM-h2-always-consume-any-trailing-data-after-end-of-output-buffers.patch @@ -0,0 +1,71 @@ +From 6fc36785addd45cc76a029a023296def53cff135 Mon Sep 17 00:00:00 2001 +From: Willy Tarreau +Date: Tue, 27 Feb 2018 15:37:25 +0100 +Subject: [PATCH] BUG/MEDIUM: h2: always consume any trailing data after end + of output buffers + +In case a stream tries to emit more data than advertised by the chunks +or content-length headers, the extra data remains in the channel's output +buffer until the channel's timeout expires. It can easily happen when +sending malformed error files making use of a wrong content-length or +having extra CRLFs after the empty chunk. It may also be possible to +forge such a bad response using Lua. + +The H1 to H2 encoder must protect itself against this by marking the data +presented to it as consumed if it decides to discard them, so that the +sending stream doesn't wait for the timeout to trigger. + +The visible effect of this problem is a huge memory usage and a high +concurrent connection count during benchmarks when using such bad data +(a typical place where this easily happens). + +This fix must be backported to 1.8. + +(cherry picked from commit 35a62705df65632e2717ae0d20a93e0cb3f8f163) +Signed-off-by: Willy Tarreau +--- + src/mux_h2.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +diff --git a/src/mux_h2.c b/src/mux_h2.c +index caae041..4303a06 100644 +--- a/src/mux_h2.c ++++ b/src/mux_h2.c +@@ -3020,6 +3020,9 @@ static int h2s_frt_make_resp_headers(struct h2s *h2s, struct buffer *buf) + * body or directly end in TRL2. + */ + if (es_now) { ++ // trim any possibly pending data (eg: inconsistent content-length) ++ bo_del(buf, buf->o); ++ + h1m->state = HTTP_MSG_DONE; + h2s->flags |= H2_SF_ES_SENT; + if (h2s->st == H2_SS_OPEN) +@@ -3269,8 +3272,12 @@ static int h2s_frt_make_resp_data(struct h2s *h2s, struct buffer *buf) + else + h2c_stream_close(h2c, h2s); + +- if (!(h1m->flags & H1_MF_CHNK)) ++ if (!(h1m->flags & H1_MF_CHNK)) { ++ // trim any possibly pending data (eg: inconsistent content-length) ++ bo_del(buf, buf->o); ++ + h1m->state = HTTP_MSG_DONE; ++ } + + h2s->flags |= H2_SF_ES_SENT; + } +@@ -3319,6 +3326,10 @@ static int h2_snd_buf(struct conn_stream *cs, struct buffer *buf, int flags) + } + total += count; + bo_del(buf, count); ++ ++ // trim any possibly pending data (eg: extra CR-LF, ...) ++ bo_del(buf, buf->o); ++ + h2s->res.state = HTTP_MSG_DONE; + break; + } +-- +1.7.10.4 + diff --git a/net/haproxy/patches/0006-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bo_putblk.patch b/net/haproxy/patches/0006-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bo_putblk.patch new file mode 100644 index 000000000..94eec1f6b --- /dev/null +++ b/net/haproxy/patches/0006-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bo_putblk.patch @@ -0,0 +1,33 @@ +From fefb8592821ff0fa56f435c581d6e92e563e7ad7 Mon Sep 17 00:00:00 2001 +From: Christopher Faulet +Date: Mon, 26 Feb 2018 10:47:03 +0100 +Subject: [PATCH] BUG/MEDIUM: buffer: Fix the wrapping case in bo_putblk + +When the block of data need to be split to support the wrapping, the start of +the second block of data was wrong. We must be sure to skip data copied during +the first memcpy. + +This patch must be backported to 1.8, 1.7, 1.6 and 1.5. + +(cherry picked from commit b2b279464c5c0f3dfadf02333e06eb0ae8ae8793) +Signed-off-by: Willy Tarreau +--- + include/common/buffer.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/common/buffer.h b/include/common/buffer.h +index 976085e..ae9aafd 100644 +--- a/include/common/buffer.h ++++ b/include/common/buffer.h +@@ -468,7 +468,7 @@ static inline int bo_putblk(struct buffer *b, const char *blk, int len) + memcpy(b->p, blk, half); + b->p = b_ptr(b, half); + if (len > half) { +- memcpy(b->p, blk, len - half); ++ memcpy(b->p, blk + half, len - half); + b->p = b_ptr(b, half); + } + b->o += len; +-- +1.7.10.4 + diff --git a/net/haproxy/patches/0007-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bi_putblk.patch b/net/haproxy/patches/0007-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bi_putblk.patch new file mode 100644 index 000000000..93ca220a9 --- /dev/null +++ b/net/haproxy/patches/0007-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bi_putblk.patch @@ -0,0 +1,33 @@ +From 14f325000b91649b9d117c4d53d6b194ed3c7b11 Mon Sep 17 00:00:00 2001 +From: Christopher Faulet +Date: Mon, 26 Feb 2018 10:51:28 +0100 +Subject: [PATCH] BUG/MEDIUM: buffer: Fix the wrapping case in bi_putblk + +When the block of data need to be split to support the wrapping, the start of +the second block of data was wrong. We must be sure to skup data copied during +the first memcpy. + +This patch must be backported to 1.8. + +(cherry picked from commit ca6ef506610e9d78f99b7ab2095ce0f8a47e18df) +Signed-off-by: Willy Tarreau +--- + include/common/buffer.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/common/buffer.h b/include/common/buffer.h +index ae9aafd..0e63913 100644 +--- a/include/common/buffer.h ++++ b/include/common/buffer.h +@@ -577,7 +577,7 @@ static inline int bi_putblk(struct buffer *b, const char *blk, int len) + + memcpy(bi_end(b), blk, half); + if (len > half) +- memcpy(b_ptr(b, b->i + half), blk, len - half); ++ memcpy(b_ptr(b, b->i + half), blk + half, len - half); + b->i += len; + return len; + } +-- +1.7.10.4 + diff --git a/net/haproxy/patches/0008-BUG-MEDIUM-h2-also-arm-the-h2-timeout-when-sending.patch b/net/haproxy/patches/0008-BUG-MEDIUM-h2-also-arm-the-h2-timeout-when-sending.patch new file mode 100644 index 000000000..9096eb15a --- /dev/null +++ b/net/haproxy/patches/0008-BUG-MEDIUM-h2-also-arm-the-h2-timeout-when-sending.patch @@ -0,0 +1,46 @@ +From ccfb5d755f1708f890b197375d962d8c938e78bd Mon Sep 17 00:00:00 2001 +From: Willy Tarreau +Date: Mon, 5 Mar 2018 16:10:54 +0100 +Subject: [PATCH] BUG/MEDIUM: h2: also arm the h2 timeout when sending + +Right now the h2 idle timeout is only set when there is no stream. If we +fail to send because the socket buffers are full (generally indicating +the client has left), we also need to arm it so that we can properly +expire such connections, otherwise some failed transfers might leave +H2 connections pending forever. + +Thanks to Thierry Fournier for the diag and the traces. + +This patch needs to be backported to 1.8. + +(cherry picked from commit 84b118f3120b3c61156f0ada12ae6456bd1a0b5a) +Signed-off-by: Willy Tarreau +--- + src/mux_h2.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/src/mux_h2.c b/src/mux_h2.c +index 4303a06..5446fd4 100644 +--- a/src/mux_h2.c ++++ b/src/mux_h2.c +@@ -2329,7 +2329,7 @@ static int h2_wake(struct connection *conn) + } + + if (h2c->task) { +- if (eb_is_empty(&h2c->streams_by_id)) { ++ if (eb_is_empty(&h2c->streams_by_id) || h2c->mbuf->o) { + h2c->task->expire = tick_add(now_ms, h2c->last_sid < 0 ? h2c->timeout : h2c->shut_timeout); + task_queue(h2c->task); + } +@@ -2501,7 +2501,7 @@ static void h2_detach(struct conn_stream *cs) + h2_release(h2c->conn); + } + else if (h2c->task) { +- if (eb_is_empty(&h2c->streams_by_id)) { ++ if (eb_is_empty(&h2c->streams_by_id) || h2c->mbuf->o) { + h2c->task->expire = tick_add(now_ms, h2c->last_sid < 0 ? h2c->timeout : h2c->shut_timeout); + task_queue(h2c->task); + } +-- +1.7.10.4 + diff --git a/net/haproxy/patches/0009-BUG-MEDIUM-fix-a-100-cpu-usage-with-cpu-map-and-nbthread-nbproc.patch b/net/haproxy/patches/0009-BUG-MEDIUM-fix-a-100-cpu-usage-with-cpu-map-and-nbthread-nbproc.patch new file mode 100644 index 000000000..23a4028ca --- /dev/null +++ b/net/haproxy/patches/0009-BUG-MEDIUM-fix-a-100-cpu-usage-with-cpu-map-and-nbthread-nbproc.patch @@ -0,0 +1,55 @@ +From 5149cd3c7abad68ddb19a0a5b3b604786d5f1b95 Mon Sep 17 00:00:00 2001 +From: =?utf8?q?Cyril=20Bont=C3=A9?= +Date: Mon, 12 Mar 2018 21:47:39 +0100 +Subject: [PATCH] BUG/MEDIUM: fix a 100% cpu usage with cpu-map and + nbthread/nbproc + +Krishna Kumar reported a 100% cpu usage with a configuration using +cpu-map and a high number of threads, + +Indeed, this minimal configuration to reproduce the issue : + global + nbthread 40 + cpu-map auto:1/1-40 0-39 + + frontend test + bind :8000 + +This is due to a wrong type in a shift operator (int vs unsigned long int), +causing an endless loop while applying the cpu affinity on threads. The same +issue may also occur with nbproc under FreeBSD. This commit addresses both +cases. + +This patch must be backported to 1.8. + +(cherry picked from commit d400ab3a369523538c426cb70e059954c76b69c3) +Signed-off-by: Willy Tarreau +--- + src/haproxy.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/src/haproxy.c b/src/haproxy.c +index 09f7b5e..7d6e019 100644 +--- a/src/haproxy.c ++++ b/src/haproxy.c +@@ -2838,7 +2838,7 @@ int main(int argc, char **argv) + CPU_ZERO(&cpuset); + while ((i = ffsl(cpu_map)) > 0) { + CPU_SET(i - 1, &cpuset); +- cpu_map &= ~(1 << (i - 1)); ++ cpu_map &= ~(1UL << (i - 1)); + } + ret = cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, sizeof(cpuset), &cpuset); + } +@@ -3038,7 +3038,7 @@ int main(int argc, char **argv) + + while ((j = ffsl(cpu_map)) > 0) { + CPU_SET(j - 1, &cpuset); +- cpu_map &= ~(1 << (j - 1)); ++ cpu_map &= ~(1UL << (j - 1)); + } + pthread_setaffinity_np(threads[i], + sizeof(cpuset), &cpuset); +-- +1.7.10.4 + diff --git a/net/haproxy/patches/0010-BUG-MEDIUM-spoe-Remove-idle-applets-from-idle-list-when-HAProxy-is-stopping.patch b/net/haproxy/patches/0010-BUG-MEDIUM-spoe-Remove-idle-applets-from-idle-list-when-HAProxy-is-stopping.patch new file mode 100644 index 000000000..ca714224b --- /dev/null +++ b/net/haproxy/patches/0010-BUG-MEDIUM-spoe-Remove-idle-applets-from-idle-list-when-HAProxy-is-stopping.patch @@ -0,0 +1,43 @@ +From 7034083b5063d28276b986d645d18071aba5f4d5 Mon Sep 17 00:00:00 2001 +From: Christopher Faulet +Date: Wed, 28 Feb 2018 13:33:26 +0100 +Subject: [PATCH] BUG/MEDIUM: spoe: Remove idle applets from idle list when + HAProxy is stopping + +In the SPOE applet's handler, when an applet is switched from the state IDLE to +PROCESSING, it is removed for the list of idle applets. But when HAProxy is +stopping, this applet can be switched to DISCONNECT. In this case, we also need +to remove it from the list of idle applets. Else the applet is removed but still +present in the list. It could lead to a segmentation fault or an infinite loop, +depending the code path. + +(cherry picked from commit 7d9f1ba246055046eed547fa35aa546683021dce) +[wt: adapted context for 1.8] +Signed-off-by: Willy Tarreau +--- + src/flt_spoe.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/flt_spoe.c b/src/flt_spoe.c +index 8fb6e0b..e76a352 100644 +--- a/src/flt_spoe.c ++++ b/src/flt_spoe.c +@@ -1866,6 +1866,7 @@ spoe_handle_appctx(struct appctx *appctx) + goto switchstate; + + case SPOE_APPCTX_ST_IDLE: ++ agent->rt[tid].applets_idle--; + if (stopping && + LIST_ISEMPTY(&agent->rt[tid].sending_queue) && + LIST_ISEMPTY(&SPOE_APPCTX(appctx)->waiting_queue)) { +@@ -1874,7 +1875,6 @@ spoe_handle_appctx(struct appctx *appctx) + appctx->st0 = SPOE_APPCTX_ST_DISCONNECT; + goto switchstate; + } +- agent->rt[tid].applets_idle--; + appctx->st0 = SPOE_APPCTX_ST_PROCESSING; + /* fall through */ + +-- +1.7.10.4 + From 3bcc1fb602dbf82c3520b00cc07a85dcadbe9fc9 Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Fri, 23 Mar 2018 20:09:30 +0100 Subject: [PATCH 03/13] haproxy: Update HAProxy to v1.8.5 - Update haproxy download URL and hash - Remove all already included patches Signed-off-by: Christian Lachner --- net/haproxy/Makefile | 6 +- ...t-SSL_ERROR_SYSCALL-as-unrecovarable.patch | 61 ----------- ...ion-for-reading-on-SSL_ERROR_SYSCALL.patch | 63 ----------- ...n-tunnel-mode-as-earlier-as-possible.patch | 69 ------------ ...le-ssl_bc_-fetch-keywords-are-broken.patch | 103 ------------------ ...ing-data-after-end-of-output-buffers.patch | 71 ------------ ...r-Fix-the-wrapping-case-in-bo_putblk.patch | 33 ------ ...r-Fix-the-wrapping-case-in-bi_putblk.patch | 33 ------ ...also-arm-the-h2-timeout-when-sending.patch | 46 -------- ...age-with-cpu-map-and-nbthread-nbproc.patch | 55 ---------- ...m-idle-list-when-HAProxy-is-stopping.patch | 43 -------- 11 files changed, 3 insertions(+), 580 deletions(-) delete mode 100644 net/haproxy/patches/0001-BUG-MEDIUM-ssl-Dont-always-treat-SSL_ERROR_SYSCALL-as-unrecovarable.patch delete mode 100644 net/haproxy/patches/0002-BUG-MEDIUM-ssl-Shutdown-the-connection-for-reading-on-SSL_ERROR_SYSCALL.patch delete mode 100644 net/haproxy/patches/0003-BUG-MEDIUM-http-Switch-the-HTTP-response-in-tunnel-mode-as-earlier-as-possible.patch delete mode 100644 net/haproxy/patches/0004-BUG-MEDIUM-ssl-sample-ssl_bc_-fetch-keywords-are-broken.patch delete mode 100644 net/haproxy/patches/0005-BUG-MEDIUM-h2-always-consume-any-trailing-data-after-end-of-output-buffers.patch delete mode 100644 net/haproxy/patches/0006-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bo_putblk.patch delete mode 100644 net/haproxy/patches/0007-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bi_putblk.patch delete mode 100644 net/haproxy/patches/0008-BUG-MEDIUM-h2-also-arm-the-h2-timeout-when-sending.patch delete mode 100644 net/haproxy/patches/0009-BUG-MEDIUM-fix-a-100-cpu-usage-with-cpu-map-and-nbthread-nbproc.patch delete mode 100644 net/haproxy/patches/0010-BUG-MEDIUM-spoe-Remove-idle-applets-from-idle-list-when-HAProxy-is-stopping.patch diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index 0a37b8f38..b975fe708 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -9,12 +9,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy -PKG_VERSION:=1.8.4 -PKG_RELEASE:=02 +PKG_VERSION:=1.8.5 +PKG_RELEASE:=01 PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/ -PKG_HASH:=e305b0a4e7dec08072841eef6ac6dcd1b5586b1eff09c2d51e152a912e8884a6 +PKG_HASH:=1c22083fa85332d5ab1c9aa8a7ec47a28d87ad9d802558808f9921d938ba20c9 PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION) PKG_LICENSE:=GPL-2.0 diff --git a/net/haproxy/patches/0001-BUG-MEDIUM-ssl-Dont-always-treat-SSL_ERROR_SYSCALL-as-unrecovarable.patch b/net/haproxy/patches/0001-BUG-MEDIUM-ssl-Dont-always-treat-SSL_ERROR_SYSCALL-as-unrecovarable.patch deleted file mode 100644 index 93b51dc40..000000000 --- a/net/haproxy/patches/0001-BUG-MEDIUM-ssl-Dont-always-treat-SSL_ERROR_SYSCALL-as-unrecovarable.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 2fcd544272a5498ffa49544e9f06b51bc93e55d1 Mon Sep 17 00:00:00 2001 -From: Olivier Houchard -Date: Tue, 13 Feb 2018 15:17:23 +0100 -Subject: [PATCH] BUG/MEDIUM: ssl: Don't always treat SSL_ERROR_SYSCALL as - unrecovarable. - -Bart Geesink reported some random errors appearing under the form of -termination flags SD in the logs for connections involving SSL traffic -to reach the servers. - -Tomek Gacek and Mateusz Malek finally narrowed down the problem to commit -c2aae74 ("MEDIUM: ssl: Handle early data with OpenSSL 1.1.1"). It happens -that the special case of SSL_ERROR_SYSCALL isn't handled anymore since -this commit. - -SSL_read() might return <= 0, and SSL_get_erro() return SSL_ERROR_SYSCALL, -without meaning the connection is gone. Before flagging the connection -as in error, check the errno value. - -This should be backported to 1.8. - -(cherry picked from commit 7e2e505006feb8f3b4a7f9e0ac5e89b5a8c4895e) -Signed-off-by: Willy Tarreau ---- - src/ssl_sock.c | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - -diff --git a/src/ssl_sock.c b/src/ssl_sock.c -index aecf3dd..f118724 100644 ---- a/src/ssl_sock.c -+++ b/src/ssl_sock.c -@@ -5437,6 +5437,12 @@ static int ssl_sock_to_buf(struct connection *conn, struct buffer *buf, int coun - break; - } else if (ret == SSL_ERROR_ZERO_RETURN) - goto read0; -+ /* For SSL_ERROR_SYSCALL, make sure the error is -+ * unrecoverable before flagging the connection as -+ * in error. -+ */ -+ if (ret == SSL_ERROR_SYSCALL && (!errno || errno == EAGAIN)) -+ goto clear_ssl_error; - /* otherwise it's a real error */ - goto out_error; - } -@@ -5451,11 +5457,12 @@ static int ssl_sock_to_buf(struct connection *conn, struct buffer *buf, int coun - conn_sock_read0(conn); - goto leave; - out_error: -+ conn->flags |= CO_FL_ERROR; -+clear_ssl_error: - /* Clear openssl global errors stack */ - ssl_sock_dump_errors(conn); - ERR_clear_error(); - -- conn->flags |= CO_FL_ERROR; - goto leave; - } - --- -1.7.10.4 - diff --git a/net/haproxy/patches/0002-BUG-MEDIUM-ssl-Shutdown-the-connection-for-reading-on-SSL_ERROR_SYSCALL.patch b/net/haproxy/patches/0002-BUG-MEDIUM-ssl-Shutdown-the-connection-for-reading-on-SSL_ERROR_SYSCALL.patch deleted file mode 100644 index 22274d366..000000000 --- a/net/haproxy/patches/0002-BUG-MEDIUM-ssl-Shutdown-the-connection-for-reading-on-SSL_ERROR_SYSCALL.patch +++ /dev/null @@ -1,63 +0,0 @@ -From f7fa1d461aa71bbc8a6c23fdcfc305f2e52ce5dd Mon Sep 17 00:00:00 2001 -From: Christopher Faulet -Date: Mon, 19 Feb 2018 14:25:15 +0100 -Subject: [PATCH] BUG/MEDIUM: ssl: Shutdown the connection for reading on - SSL_ERROR_SYSCALL - -When SSL_read returns SSL_ERROR_SYSCALL and errno is unset or set to EAGAIN, the -connection must be shut down for reading. Else, the connection loops infinitly, -consuming all the CPU. - -The bug was introduced in the commit 7e2e50500 ("BUG/MEDIUM: ssl: Don't always -treat SSL_ERROR_SYSCALL as unrecovarable."). This patch must be backported in -1.8 too. - -(cherry picked from commit 4ac77a98cda3d0f9b1d9de7bbbda2c91357f0767) -Signed-off-by: Willy Tarreau ---- - src/ssl_sock.c | 14 ++++++++------ - 1 file changed, 8 insertions(+), 6 deletions(-) - -diff --git a/src/ssl_sock.c b/src/ssl_sock.c -index f118724..a065bbb 100644 ---- a/src/ssl_sock.c -+++ b/src/ssl_sock.c -@@ -5437,10 +5437,9 @@ static int ssl_sock_to_buf(struct connection *conn, struct buffer *buf, int coun - break; - } else if (ret == SSL_ERROR_ZERO_RETURN) - goto read0; -- /* For SSL_ERROR_SYSCALL, make sure the error is -- * unrecoverable before flagging the connection as -- * in error. -- */ -+ /* For SSL_ERROR_SYSCALL, make sure to clear the error -+ * stack before shutting down the connection for -+ * reading. */ - if (ret == SSL_ERROR_SYSCALL && (!errno || errno == EAGAIN)) - goto clear_ssl_error; - /* otherwise it's a real error */ -@@ -5453,16 +5452,19 @@ static int ssl_sock_to_buf(struct connection *conn, struct buffer *buf, int coun - conn_cond_update_sock_polling(conn); - return done; - -+ clear_ssl_error: -+ /* Clear openssl global errors stack */ -+ ssl_sock_dump_errors(conn); -+ ERR_clear_error(); - read0: - conn_sock_read0(conn); - goto leave; -+ - out_error: - conn->flags |= CO_FL_ERROR; --clear_ssl_error: - /* Clear openssl global errors stack */ - ssl_sock_dump_errors(conn); - ERR_clear_error(); -- - goto leave; - } - --- -1.7.10.4 - diff --git a/net/haproxy/patches/0003-BUG-MEDIUM-http-Switch-the-HTTP-response-in-tunnel-mode-as-earlier-as-possible.patch b/net/haproxy/patches/0003-BUG-MEDIUM-http-Switch-the-HTTP-response-in-tunnel-mode-as-earlier-as-possible.patch deleted file mode 100644 index 446a6107d..000000000 --- a/net/haproxy/patches/0003-BUG-MEDIUM-http-Switch-the-HTTP-response-in-tunnel-mode-as-earlier-as-possible.patch +++ /dev/null @@ -1,69 +0,0 @@ -From 8a5949f2d74c3a3a6c6da25449992c312b183ef3 Mon Sep 17 00:00:00 2001 -From: Christopher Faulet -Date: Fri, 2 Feb 2018 15:54:15 +0100 -Subject: [PATCH] BUG/MEDIUM: http: Switch the HTTP response in tunnel mode as - earlier as possible - -When the body length is undefined (no Content-Length or Transfer-Encoding -headers), The reponse remains in ending mode, waiting the request is done. So, -most of time this is not a problem because the resquest is done before the -response. But when a client sends data to a server that replies without waiting -all the data, it is really not desirable to wait the end of the request to -finish the response. - -This bug was introduced when the tunneling of the request and the reponse was -refactored, in commit 4be980391 ("MINOR: http: Switch requests/responses in -TUNNEL mode only by checking txn flag"). - -This patch should be backported in 1.8 and 1.7. - -(cherry picked from commit fd04fcf5edb0a24cd29ce8f4d4dc2aa3a0e2e82c) -Signed-off-by: Willy Tarreau ---- - src/proto_http.c | 15 +++++---------- - 1 file changed, 5 insertions(+), 10 deletions(-) - -diff --git a/src/proto_http.c b/src/proto_http.c -index 64bd410..29880ea 100644 ---- a/src/proto_http.c -+++ b/src/proto_http.c -@@ -4634,16 +4634,8 @@ int http_sync_res_state(struct stream *s) - * let's enforce it now that we're not expecting any new - * data to come. The caller knows the stream is complete - * once both states are CLOSED. -- * -- * However, there is an exception if the response length -- * is undefined. In this case, we switch in TUNNEL mode. - */ -- if (!(txn->rsp.flags & HTTP_MSGF_XFER_LEN)) { -- channel_auto_read(chn); -- txn->rsp.msg_state = HTTP_MSG_TUNNEL; -- chn->flags |= CF_NEVER_WAIT; -- } -- else if (!(chn->flags & (CF_SHUTW|CF_SHUTW_NOW))) { -+ if (!(chn->flags & (CF_SHUTW|CF_SHUTW_NOW))) { - channel_shutr_now(chn); - channel_shutw_now(chn); - } -@@ -6241,6 +6233,8 @@ http_msg_forward_body(struct stream *s, struct http_msg *msg) - /* The server still sending data that should be filtered */ - if (!(chn->flags & CF_SHUTR) && HAS_DATA_FILTERS(s, chn)) - goto missing_data_or_waiting; -+ msg->msg_state = HTTP_MSG_TUNNEL; -+ goto ending; - } - - msg->msg_state = HTTP_MSG_ENDING; -@@ -6262,7 +6256,8 @@ http_msg_forward_body(struct stream *s, struct http_msg *msg) - /* default_ret */ 1, - /* on_error */ goto error, - /* on_wait */ goto waiting); -- msg->msg_state = HTTP_MSG_DONE; -+ if (msg->msg_state == HTTP_MSG_ENDING) -+ msg->msg_state = HTTP_MSG_DONE; - return 1; - - missing_data_or_waiting: --- -1.7.10.4 - diff --git a/net/haproxy/patches/0004-BUG-MEDIUM-ssl-sample-ssl_bc_-fetch-keywords-are-broken.patch b/net/haproxy/patches/0004-BUG-MEDIUM-ssl-sample-ssl_bc_-fetch-keywords-are-broken.patch deleted file mode 100644 index 11d2ef9c0..000000000 --- a/net/haproxy/patches/0004-BUG-MEDIUM-ssl-sample-ssl_bc_-fetch-keywords-are-broken.patch +++ /dev/null @@ -1,103 +0,0 @@ -From 7ccf7c9791f2b2329f3940d1347618af3a77bebc Mon Sep 17 00:00:00 2001 -From: Emeric Brun -Date: Mon, 19 Feb 2018 15:59:48 +0100 -Subject: [PATCH] BUG/MEDIUM: ssl/sample: ssl_bc_* fetch keywords are broken. - -Since the split between connections and conn-stream objects, this -keywords are broken. - -This patch must be backported in 1.8 - -(cherry picked from commit eb8def9f34c37537d56a69fcd211d4c4c8006bea) -Signed-off-by: Willy Tarreau ---- - src/ssl_sock.c | 31 ++++++++++++++----------------- - 1 file changed, 14 insertions(+), 17 deletions(-) - -diff --git a/src/ssl_sock.c b/src/ssl_sock.c -index 4d0d5db..d832d76 100644 ---- a/src/ssl_sock.c -+++ b/src/ssl_sock.c -@@ -6580,8 +6580,8 @@ smp_fetch_ssl_x_key_alg(const struct arg *args, struct sample *smp, const char * - static int - smp_fetch_ssl_fc(const struct arg *args, struct sample *smp, const char *kw, void *private) - { -- struct connection *conn = objt_conn((kw[4] != 'b') ? smp->sess->origin : -- smp->strm ? smp->strm->si[1].end : NULL); -+ struct connection *conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) : -+ smp->strm ? cs_conn(objt_cs(smp->strm->si[1].end)) : NULL; - - smp->data.type = SMP_T_BOOL; - smp->data.u.sint = (conn && conn->xprt == &ssl_sock); -@@ -6625,8 +6625,8 @@ smp_fetch_ssl_fc_is_resumed(const struct arg *args, struct sample *smp, const ch - static int - smp_fetch_ssl_fc_cipher(const struct arg *args, struct sample *smp, const char *kw, void *private) - { -- struct connection *conn = objt_conn((kw[4] != 'b') ? smp->sess->origin : -- smp->strm ? smp->strm->si[1].end : NULL); -+ struct connection *conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) : -+ smp->strm ? cs_conn(objt_cs(smp->strm->si[1].end)) : NULL; - - smp->flags = 0; - if (!conn || !conn->xprt_ctx || conn->xprt != &ssl_sock) -@@ -6651,9 +6651,8 @@ smp_fetch_ssl_fc_cipher(const struct arg *args, struct sample *smp, const char * - static int - smp_fetch_ssl_fc_alg_keysize(const struct arg *args, struct sample *smp, const char *kw, void *private) - { -- struct connection *conn = objt_conn((kw[4] != 'b') ? smp->sess->origin : -- smp->strm ? smp->strm->si[1].end : NULL); -- -+ struct connection *conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) : -+ smp->strm ? cs_conn(objt_cs(smp->strm->si[1].end)) : NULL; - int sint; - - smp->flags = 0; -@@ -6676,8 +6675,8 @@ smp_fetch_ssl_fc_alg_keysize(const struct arg *args, struct sample *smp, const c - static int - smp_fetch_ssl_fc_use_keysize(const struct arg *args, struct sample *smp, const char *kw, void *private) - { -- struct connection *conn = objt_conn((kw[4] != 'b') ? smp->sess->origin : -- smp->strm ? smp->strm->si[1].end : NULL); -+ struct connection *conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) : -+ smp->strm ? cs_conn(objt_cs(smp->strm->si[1].end)) : NULL; - - smp->flags = 0; - if (!conn || !conn->xprt_ctx || conn->xprt != &ssl_sock) -@@ -6747,8 +6746,8 @@ smp_fetch_ssl_fc_alpn(const struct arg *args, struct sample *smp, const char *kw - static int - smp_fetch_ssl_fc_protocol(const struct arg *args, struct sample *smp, const char *kw, void *private) - { -- struct connection *conn = objt_conn((kw[4] != 'b') ? smp->sess->origin : -- smp->strm ? smp->strm->si[1].end : NULL); -+ struct connection *conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) : -+ smp->strm ? cs_conn(objt_cs(smp->strm->si[1].end)) : NULL; - - smp->flags = 0; - if (!conn || !conn->xprt_ctx || conn->xprt != &ssl_sock) -@@ -6773,9 +6772,8 @@ static int - smp_fetch_ssl_fc_session_id(const struct arg *args, struct sample *smp, const char *kw, void *private) - { - #if OPENSSL_VERSION_NUMBER > 0x0090800fL -- struct connection *conn = objt_conn((kw[4] != 'b') ? smp->sess->origin : -- smp->strm ? smp->strm->si[1].end : NULL); -- -+ struct connection *conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) : -+ smp->strm ? cs_conn(objt_cs(smp->strm->si[1].end)) : NULL; - SSL_SESSION *ssl_sess; - - smp->flags = SMP_F_CONST; -@@ -6917,9 +6915,8 @@ static int - smp_fetch_ssl_fc_unique_id(const struct arg *args, struct sample *smp, const char *kw, void *private) - { - #if OPENSSL_VERSION_NUMBER > 0x0090800fL -- struct connection *conn = objt_conn((kw[4] != 'b') ? smp->sess->origin : -- smp->strm ? smp->strm->si[1].end : NULL); -- -+ struct connection *conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) : -+ smp->strm ? cs_conn(objt_cs(smp->strm->si[1].end)) : NULL; - int finished_len; - struct chunk *finished_trash; - --- -1.7.10.4 - diff --git a/net/haproxy/patches/0005-BUG-MEDIUM-h2-always-consume-any-trailing-data-after-end-of-output-buffers.patch b/net/haproxy/patches/0005-BUG-MEDIUM-h2-always-consume-any-trailing-data-after-end-of-output-buffers.patch deleted file mode 100644 index 2ed041f39..000000000 --- a/net/haproxy/patches/0005-BUG-MEDIUM-h2-always-consume-any-trailing-data-after-end-of-output-buffers.patch +++ /dev/null @@ -1,71 +0,0 @@ -From 6fc36785addd45cc76a029a023296def53cff135 Mon Sep 17 00:00:00 2001 -From: Willy Tarreau -Date: Tue, 27 Feb 2018 15:37:25 +0100 -Subject: [PATCH] BUG/MEDIUM: h2: always consume any trailing data after end - of output buffers - -In case a stream tries to emit more data than advertised by the chunks -or content-length headers, the extra data remains in the channel's output -buffer until the channel's timeout expires. It can easily happen when -sending malformed error files making use of a wrong content-length or -having extra CRLFs after the empty chunk. It may also be possible to -forge such a bad response using Lua. - -The H1 to H2 encoder must protect itself against this by marking the data -presented to it as consumed if it decides to discard them, so that the -sending stream doesn't wait for the timeout to trigger. - -The visible effect of this problem is a huge memory usage and a high -concurrent connection count during benchmarks when using such bad data -(a typical place where this easily happens). - -This fix must be backported to 1.8. - -(cherry picked from commit 35a62705df65632e2717ae0d20a93e0cb3f8f163) -Signed-off-by: Willy Tarreau ---- - src/mux_h2.c | 13 ++++++++++++- - 1 file changed, 12 insertions(+), 1 deletion(-) - -diff --git a/src/mux_h2.c b/src/mux_h2.c -index caae041..4303a06 100644 ---- a/src/mux_h2.c -+++ b/src/mux_h2.c -@@ -3020,6 +3020,9 @@ static int h2s_frt_make_resp_headers(struct h2s *h2s, struct buffer *buf) - * body or directly end in TRL2. - */ - if (es_now) { -+ // trim any possibly pending data (eg: inconsistent content-length) -+ bo_del(buf, buf->o); -+ - h1m->state = HTTP_MSG_DONE; - h2s->flags |= H2_SF_ES_SENT; - if (h2s->st == H2_SS_OPEN) -@@ -3269,8 +3272,12 @@ static int h2s_frt_make_resp_data(struct h2s *h2s, struct buffer *buf) - else - h2c_stream_close(h2c, h2s); - -- if (!(h1m->flags & H1_MF_CHNK)) -+ if (!(h1m->flags & H1_MF_CHNK)) { -+ // trim any possibly pending data (eg: inconsistent content-length) -+ bo_del(buf, buf->o); -+ - h1m->state = HTTP_MSG_DONE; -+ } - - h2s->flags |= H2_SF_ES_SENT; - } -@@ -3319,6 +3326,10 @@ static int h2_snd_buf(struct conn_stream *cs, struct buffer *buf, int flags) - } - total += count; - bo_del(buf, count); -+ -+ // trim any possibly pending data (eg: extra CR-LF, ...) -+ bo_del(buf, buf->o); -+ - h2s->res.state = HTTP_MSG_DONE; - break; - } --- -1.7.10.4 - diff --git a/net/haproxy/patches/0006-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bo_putblk.patch b/net/haproxy/patches/0006-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bo_putblk.patch deleted file mode 100644 index 94eec1f6b..000000000 --- a/net/haproxy/patches/0006-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bo_putblk.patch +++ /dev/null @@ -1,33 +0,0 @@ -From fefb8592821ff0fa56f435c581d6e92e563e7ad7 Mon Sep 17 00:00:00 2001 -From: Christopher Faulet -Date: Mon, 26 Feb 2018 10:47:03 +0100 -Subject: [PATCH] BUG/MEDIUM: buffer: Fix the wrapping case in bo_putblk - -When the block of data need to be split to support the wrapping, the start of -the second block of data was wrong. We must be sure to skip data copied during -the first memcpy. - -This patch must be backported to 1.8, 1.7, 1.6 and 1.5. - -(cherry picked from commit b2b279464c5c0f3dfadf02333e06eb0ae8ae8793) -Signed-off-by: Willy Tarreau ---- - include/common/buffer.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/include/common/buffer.h b/include/common/buffer.h -index 976085e..ae9aafd 100644 ---- a/include/common/buffer.h -+++ b/include/common/buffer.h -@@ -468,7 +468,7 @@ static inline int bo_putblk(struct buffer *b, const char *blk, int len) - memcpy(b->p, blk, half); - b->p = b_ptr(b, half); - if (len > half) { -- memcpy(b->p, blk, len - half); -+ memcpy(b->p, blk + half, len - half); - b->p = b_ptr(b, half); - } - b->o += len; --- -1.7.10.4 - diff --git a/net/haproxy/patches/0007-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bi_putblk.patch b/net/haproxy/patches/0007-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bi_putblk.patch deleted file mode 100644 index 93ca220a9..000000000 --- a/net/haproxy/patches/0007-BUG-MEDIUM-buffer-Fix-the-wrapping-case-in-bi_putblk.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 14f325000b91649b9d117c4d53d6b194ed3c7b11 Mon Sep 17 00:00:00 2001 -From: Christopher Faulet -Date: Mon, 26 Feb 2018 10:51:28 +0100 -Subject: [PATCH] BUG/MEDIUM: buffer: Fix the wrapping case in bi_putblk - -When the block of data need to be split to support the wrapping, the start of -the second block of data was wrong. We must be sure to skup data copied during -the first memcpy. - -This patch must be backported to 1.8. - -(cherry picked from commit ca6ef506610e9d78f99b7ab2095ce0f8a47e18df) -Signed-off-by: Willy Tarreau ---- - include/common/buffer.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/include/common/buffer.h b/include/common/buffer.h -index ae9aafd..0e63913 100644 ---- a/include/common/buffer.h -+++ b/include/common/buffer.h -@@ -577,7 +577,7 @@ static inline int bi_putblk(struct buffer *b, const char *blk, int len) - - memcpy(bi_end(b), blk, half); - if (len > half) -- memcpy(b_ptr(b, b->i + half), blk, len - half); -+ memcpy(b_ptr(b, b->i + half), blk + half, len - half); - b->i += len; - return len; - } --- -1.7.10.4 - diff --git a/net/haproxy/patches/0008-BUG-MEDIUM-h2-also-arm-the-h2-timeout-when-sending.patch b/net/haproxy/patches/0008-BUG-MEDIUM-h2-also-arm-the-h2-timeout-when-sending.patch deleted file mode 100644 index 9096eb15a..000000000 --- a/net/haproxy/patches/0008-BUG-MEDIUM-h2-also-arm-the-h2-timeout-when-sending.patch +++ /dev/null @@ -1,46 +0,0 @@ -From ccfb5d755f1708f890b197375d962d8c938e78bd Mon Sep 17 00:00:00 2001 -From: Willy Tarreau -Date: Mon, 5 Mar 2018 16:10:54 +0100 -Subject: [PATCH] BUG/MEDIUM: h2: also arm the h2 timeout when sending - -Right now the h2 idle timeout is only set when there is no stream. If we -fail to send because the socket buffers are full (generally indicating -the client has left), we also need to arm it so that we can properly -expire such connections, otherwise some failed transfers might leave -H2 connections pending forever. - -Thanks to Thierry Fournier for the diag and the traces. - -This patch needs to be backported to 1.8. - -(cherry picked from commit 84b118f3120b3c61156f0ada12ae6456bd1a0b5a) -Signed-off-by: Willy Tarreau ---- - src/mux_h2.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/src/mux_h2.c b/src/mux_h2.c -index 4303a06..5446fd4 100644 ---- a/src/mux_h2.c -+++ b/src/mux_h2.c -@@ -2329,7 +2329,7 @@ static int h2_wake(struct connection *conn) - } - - if (h2c->task) { -- if (eb_is_empty(&h2c->streams_by_id)) { -+ if (eb_is_empty(&h2c->streams_by_id) || h2c->mbuf->o) { - h2c->task->expire = tick_add(now_ms, h2c->last_sid < 0 ? h2c->timeout : h2c->shut_timeout); - task_queue(h2c->task); - } -@@ -2501,7 +2501,7 @@ static void h2_detach(struct conn_stream *cs) - h2_release(h2c->conn); - } - else if (h2c->task) { -- if (eb_is_empty(&h2c->streams_by_id)) { -+ if (eb_is_empty(&h2c->streams_by_id) || h2c->mbuf->o) { - h2c->task->expire = tick_add(now_ms, h2c->last_sid < 0 ? h2c->timeout : h2c->shut_timeout); - task_queue(h2c->task); - } --- -1.7.10.4 - diff --git a/net/haproxy/patches/0009-BUG-MEDIUM-fix-a-100-cpu-usage-with-cpu-map-and-nbthread-nbproc.patch b/net/haproxy/patches/0009-BUG-MEDIUM-fix-a-100-cpu-usage-with-cpu-map-and-nbthread-nbproc.patch deleted file mode 100644 index 23a4028ca..000000000 --- a/net/haproxy/patches/0009-BUG-MEDIUM-fix-a-100-cpu-usage-with-cpu-map-and-nbthread-nbproc.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 5149cd3c7abad68ddb19a0a5b3b604786d5f1b95 Mon Sep 17 00:00:00 2001 -From: =?utf8?q?Cyril=20Bont=C3=A9?= -Date: Mon, 12 Mar 2018 21:47:39 +0100 -Subject: [PATCH] BUG/MEDIUM: fix a 100% cpu usage with cpu-map and - nbthread/nbproc - -Krishna Kumar reported a 100% cpu usage with a configuration using -cpu-map and a high number of threads, - -Indeed, this minimal configuration to reproduce the issue : - global - nbthread 40 - cpu-map auto:1/1-40 0-39 - - frontend test - bind :8000 - -This is due to a wrong type in a shift operator (int vs unsigned long int), -causing an endless loop while applying the cpu affinity on threads. The same -issue may also occur with nbproc under FreeBSD. This commit addresses both -cases. - -This patch must be backported to 1.8. - -(cherry picked from commit d400ab3a369523538c426cb70e059954c76b69c3) -Signed-off-by: Willy Tarreau ---- - src/haproxy.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/src/haproxy.c b/src/haproxy.c -index 09f7b5e..7d6e019 100644 ---- a/src/haproxy.c -+++ b/src/haproxy.c -@@ -2838,7 +2838,7 @@ int main(int argc, char **argv) - CPU_ZERO(&cpuset); - while ((i = ffsl(cpu_map)) > 0) { - CPU_SET(i - 1, &cpuset); -- cpu_map &= ~(1 << (i - 1)); -+ cpu_map &= ~(1UL << (i - 1)); - } - ret = cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, sizeof(cpuset), &cpuset); - } -@@ -3038,7 +3038,7 @@ int main(int argc, char **argv) - - while ((j = ffsl(cpu_map)) > 0) { - CPU_SET(j - 1, &cpuset); -- cpu_map &= ~(1 << (j - 1)); -+ cpu_map &= ~(1UL << (j - 1)); - } - pthread_setaffinity_np(threads[i], - sizeof(cpuset), &cpuset); --- -1.7.10.4 - diff --git a/net/haproxy/patches/0010-BUG-MEDIUM-spoe-Remove-idle-applets-from-idle-list-when-HAProxy-is-stopping.patch b/net/haproxy/patches/0010-BUG-MEDIUM-spoe-Remove-idle-applets-from-idle-list-when-HAProxy-is-stopping.patch deleted file mode 100644 index ca714224b..000000000 --- a/net/haproxy/patches/0010-BUG-MEDIUM-spoe-Remove-idle-applets-from-idle-list-when-HAProxy-is-stopping.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 7034083b5063d28276b986d645d18071aba5f4d5 Mon Sep 17 00:00:00 2001 -From: Christopher Faulet -Date: Wed, 28 Feb 2018 13:33:26 +0100 -Subject: [PATCH] BUG/MEDIUM: spoe: Remove idle applets from idle list when - HAProxy is stopping - -In the SPOE applet's handler, when an applet is switched from the state IDLE to -PROCESSING, it is removed for the list of idle applets. But when HAProxy is -stopping, this applet can be switched to DISCONNECT. In this case, we also need -to remove it from the list of idle applets. Else the applet is removed but still -present in the list. It could lead to a segmentation fault or an infinite loop, -depending the code path. - -(cherry picked from commit 7d9f1ba246055046eed547fa35aa546683021dce) -[wt: adapted context for 1.8] -Signed-off-by: Willy Tarreau ---- - src/flt_spoe.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/flt_spoe.c b/src/flt_spoe.c -index 8fb6e0b..e76a352 100644 ---- a/src/flt_spoe.c -+++ b/src/flt_spoe.c -@@ -1866,6 +1866,7 @@ spoe_handle_appctx(struct appctx *appctx) - goto switchstate; - - case SPOE_APPCTX_ST_IDLE: -+ agent->rt[tid].applets_idle--; - if (stopping && - LIST_ISEMPTY(&agent->rt[tid].sending_queue) && - LIST_ISEMPTY(&SPOE_APPCTX(appctx)->waiting_queue)) { -@@ -1874,7 +1875,6 @@ spoe_handle_appctx(struct appctx *appctx) - appctx->st0 = SPOE_APPCTX_ST_DISCONNECT; - goto switchstate; - } -- agent->rt[tid].applets_idle--; - appctx->st0 = SPOE_APPCTX_ST_PROCESSING; - /* fall through */ - --- -1.7.10.4 - From 17d73b0dc19fb82852b518bc1c9678479aa02668 Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Fri, 30 Mar 2018 11:10:46 +0200 Subject: [PATCH 04/13] haproxy: Update MEDIUM+ patches for HAProxy v1.8.5 - Add new MEDIUM+ patches (see https://www.haproxy.org/bugs/bugs-1.8.5.html) - Raise patch-level to 02 Signed-off-by: Christian Lachner --- net/haproxy/Makefile | 2 +- ...eading-h2c_stream_close-to-h2s_close.patch | 167 ++++++++++++++++++ ...vide-and-use-h2s_detach-and-h2s_free.patch | 109 ++++++++++++ ...ms-from-the-send-list-before-closing.patch | 71 ++++++++ ...the-task-outside-of-the-task-handler.patch | 91 ++++++++++ ...-on-detach-if-connection-is-in-error.patch | 39 ++++ 6 files changed, 478 insertions(+), 1 deletion(-) create mode 100755 net/haproxy/patches/0001-CLEANUP-h2-rename-misleading-h2c_stream_close-to-h2s_close.patch create mode 100755 net/haproxy/patches/0002-MINOR-h2-provide-and-use-h2s_detach-and-h2s_free.patch create mode 100755 net/haproxy/patches/0003-BUG-MAJOR-h2-remove-orphaned-streams-from-the-send-list-before-closing.patch create mode 100755 net/haproxy/patches/0004-BUG-MEDIUM-h2-threads-never-release-the-task-outside-of-the-task-handler.patch create mode 100755 net/haproxy/patches/0005-BUG-MEDIUM-h2-dont-consider-pending-data-on-detach-if-connection-is-in-error.patch diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index b975fe708..5ee0f9eb6 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -10,7 +10,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy PKG_VERSION:=1.8.5 -PKG_RELEASE:=01 +PKG_RELEASE:=02 PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/ diff --git a/net/haproxy/patches/0001-CLEANUP-h2-rename-misleading-h2c_stream_close-to-h2s_close.patch b/net/haproxy/patches/0001-CLEANUP-h2-rename-misleading-h2c_stream_close-to-h2s_close.patch new file mode 100755 index 000000000..87e3e49c4 --- /dev/null +++ b/net/haproxy/patches/0001-CLEANUP-h2-rename-misleading-h2c_stream_close-to-h2s_close.patch @@ -0,0 +1,167 @@ +From 27b2c5ead5cf85626d4169ab46b3246d65033b58 Mon Sep 17 00:00:00 2001 +From: Willy Tarreau +Date: Thu, 1 Mar 2018 16:31:34 +0100 +Subject: [PATCH] CLEANUP: h2: rename misleading h2c_stream_close() to + h2s_close() + +This function takes an h2c and an h2s but it never uses the h2c, which +is a bit confusing at some places in the code. Let's make it clear that +it only operates on the h2s instead by renaming it and removing the +unused h2c argument. + +(cherry picked from commit 00dd07895a6ee856c811c6d60a8e3d4c7d973c63) +Signed-off-by: Willy Tarreau +--- + src/mux_h2.c | 37 ++++++++++++++++++------------------- + 1 file changed, 18 insertions(+), 19 deletions(-) + +diff --git a/src/mux_h2.c b/src/mux_h2.c +index bb0a3e3..0bb79a4 100644 +--- a/src/mux_h2.c ++++ b/src/mux_h2.c +@@ -630,12 +630,11 @@ static inline __maybe_unused int h2_get_frame_hdr(struct buffer *b, struct h2_fh + return ret; + } + +-/* marks stream as CLOSED for connection and decrement the number +- * of active streams for this connection if the stream was not yet closed. +- * Please use this exclusively before closing a stream to ensure stream count +- * is well maintained. ++/* marks stream as CLOSED and decrement the number of active streams for ++ * its connection if the stream was not yet closed. Please use this exclusively ++ * before closing a stream to ensure stream count is well maintained. + */ +-static inline void h2c_stream_close(struct h2c *h2c, struct h2s *h2s) ++static inline void h2s_close(struct h2s *h2s) + { + if (h2s->st != H2_SS_CLOSED) + h2s->h2c->nb_streams--; +@@ -924,7 +923,7 @@ static int h2s_send_rst_stream(struct h2c *h2c, struct h2s *h2s) + + ignore: + h2s->flags |= H2_SF_RST_SENT; +- h2c_stream_close(h2c, h2s); ++ h2s_close(h2s); + return ret; + } + +@@ -988,7 +987,7 @@ static int h2c_send_rst_stream(struct h2c *h2c, struct h2s *h2s) + ignore: + if (h2s->st > H2_SS_IDLE && h2s->st < H2_SS_CLOSED) { + h2s->flags |= H2_SF_RST_SENT; +- h2c_stream_close(h2c, h2s); ++ h2s_close(h2s); + } + + return ret; +@@ -1066,7 +1065,7 @@ static void h2_wake_some_streams(struct h2c *h2c, int last, uint32_t flags) + + if (!h2s->cs) { + /* this stream was already orphaned */ +- h2c_stream_close(h2c, h2s); ++ h2s_close(h2s); + eb32_delete(&h2s->by_id); + pool_free(pool_head_h2s, h2s); + continue; +@@ -1084,7 +1083,7 @@ static void h2_wake_some_streams(struct h2c *h2c, int last, uint32_t flags) + else if (flags & CS_FL_EOS && h2s->st == H2_SS_OPEN) + h2s->st = H2_SS_HREM; + else if (flags & CS_FL_EOS && h2s->st == H2_SS_HLOC) +- h2c_stream_close(h2c, h2s); ++ h2s_close(h2s); + } + } + +@@ -1551,7 +1550,7 @@ static int h2c_handle_rst_stream(struct h2c *h2c, struct h2s *h2s) + return 1; + + h2s->errcode = h2_get_n32(h2c->dbuf, 0); +- h2c_stream_close(h2c, h2s); ++ h2s_close(h2s); + + if (h2s->cs) { + h2s->cs->flags |= CS_FL_EOS | CS_FL_ERROR; +@@ -2099,7 +2098,7 @@ static int h2_process_mux(struct h2c *h2c) + h2s->cs->flags &= ~CS_FL_DATA_WR_ENA; + else { + /* just sent the last frame for this orphaned stream */ +- h2c_stream_close(h2c, h2s); ++ h2s_close(h2s); + eb32_delete(&h2s->by_id); + pool_free(pool_head_h2s, h2s); + } +@@ -2142,7 +2141,7 @@ static int h2_process_mux(struct h2c *h2c) + h2s->cs->flags &= ~CS_FL_DATA_WR_ENA; + else { + /* just sent the last frame for this orphaned stream */ +- h2c_stream_close(h2c, h2s); ++ h2s_close(h2s); + eb32_delete(&h2s->by_id); + pool_free(pool_head_h2s, h2s); + } +@@ -2501,7 +2500,7 @@ static void h2_detach(struct conn_stream *cs) + + if (h2s->by_id.node.leaf_p) { + /* h2s still attached to the h2c */ +- h2c_stream_close(h2c, h2s); ++ h2s_close(h2s); + eb32_delete(&h2s->by_id); + + /* We don't want to close right now unless we're removing the +@@ -2557,7 +2556,7 @@ static void h2_shutr(struct conn_stream *cs, enum cs_shr_mode mode) + if (h2s->h2c->mbuf->o && !(cs->conn->flags & CO_FL_XPRT_WR_ENA)) + conn_xprt_want_send(cs->conn); + +- h2c_stream_close(h2s->h2c, h2s); ++ h2s_close(h2s); + } + + static void h2_shutw(struct conn_stream *cs, enum cs_shw_mode mode) +@@ -2575,7 +2574,7 @@ static void h2_shutw(struct conn_stream *cs, enum cs_shw_mode mode) + return; + + if (h2s->st == H2_SS_HREM) +- h2c_stream_close(h2s->h2c, h2s); ++ h2s_close(h2s); + else + h2s->st = H2_SS_HLOC; + } else { +@@ -2593,7 +2592,7 @@ static void h2_shutw(struct conn_stream *cs, enum cs_shw_mode mode) + h2c_send_goaway_error(h2s->h2c, h2s) <= 0) + return; + +- h2c_stream_close(h2s->h2c, h2s); ++ h2s_close(h2s); + } + + if (h2s->h2c->mbuf->o && !(cs->conn->flags & CO_FL_XPRT_WR_ENA)) +@@ -3049,7 +3048,7 @@ static int h2s_frt_make_resp_headers(struct h2s *h2s, struct buffer *buf) + if (h2s->st == H2_SS_OPEN) + h2s->st = H2_SS_HLOC; + else +- h2c_stream_close(h2c, h2s); ++ h2s_close(h2s); + } + else if (h1m->status >= 100 && h1m->status < 200) { + /* we'll let the caller check if it has more headers to send */ +@@ -3291,7 +3290,7 @@ static int h2s_frt_make_resp_data(struct h2s *h2s, struct buffer *buf) + if (h2s->st == H2_SS_OPEN) + h2s->st = H2_SS_HLOC; + else +- h2c_stream_close(h2c, h2s); ++ h2s_close(h2s); + + if (!(h1m->flags & H1_MF_CHNK)) { + // trim any possibly pending data (eg: inconsistent content-length) +@@ -3364,7 +3363,7 @@ static int h2_snd_buf(struct conn_stream *cs, struct buffer *buf, int flags) + if (h2s->st == H2_SS_ERROR || h2s->flags & H2_SF_RST_RCVD) { + cs->flags |= CS_FL_ERROR; + if (h2s_send_rst_stream(h2s->h2c, h2s) > 0) +- h2c_stream_close(h2s->h2c, h2s); ++ h2s_close(h2s); + } + + if (h2s->flags & H2_SF_BLK_SFCTL) { +-- +1.7.10.4 + diff --git a/net/haproxy/patches/0002-MINOR-h2-provide-and-use-h2s_detach-and-h2s_free.patch b/net/haproxy/patches/0002-MINOR-h2-provide-and-use-h2s_detach-and-h2s_free.patch new file mode 100755 index 000000000..17d085983 --- /dev/null +++ b/net/haproxy/patches/0002-MINOR-h2-provide-and-use-h2s_detach-and-h2s_free.patch @@ -0,0 +1,109 @@ +From 518db3f8602fae9caa816ec373855cf0f8c6c45d Mon Sep 17 00:00:00 2001 +From: Willy Tarreau +Date: Thu, 1 Mar 2018 16:27:53 +0100 +Subject: [PATCH] MINOR: h2: provide and use h2s_detach() and h2s_free() + +These ones save us from open-coding the cleanup functions on each and +every error path. The code was updated to use them with no functional +change. + +(cherry picked from commit 0a10de606685ed4e65d4cc84237c6a09dd6fe27c) +Signed-off-by: Willy Tarreau +--- + src/mux_h2.c | 38 +++++++++++++++++++++++--------------- + 1 file changed, 23 insertions(+), 15 deletions(-) + +diff --git a/src/mux_h2.c b/src/mux_h2.c +index 0bb79a4..ff1de8c 100644 +--- a/src/mux_h2.c ++++ b/src/mux_h2.c +@@ -641,6 +641,19 @@ static inline void h2s_close(struct h2s *h2s) + h2s->st = H2_SS_CLOSED; + } + ++/* detaches an H2 stream from its H2C. */ ++static void h2s_detach(struct h2s *h2s) ++{ ++ h2s_close(h2s); ++ eb32_delete(&h2s->by_id); ++} ++ ++/* releases an H2 stream back to the pool, and detaches it from the h2c. */ ++static void h2s_free(struct h2s *h2s) ++{ ++ pool_free(pool_head_h2s, h2s); ++} ++ + /* creates a new stream on the h2c connection and returns it, or NULL in + * case of memory allocation error. + */ +@@ -685,9 +698,8 @@ static struct h2s *h2c_stream_new(struct h2c *h2c, int id) + out_free_cs: + cs_free(cs); + out_close: +- h2c->nb_streams--; +- eb32_delete(&h2s->by_id); +- pool_free(pool_head_h2s, h2s); ++ h2s_detach(h2s); ++ h2s_free(h2s); + h2s = NULL; + out: + return h2s; +@@ -1065,9 +1077,8 @@ static void h2_wake_some_streams(struct h2c *h2c, int last, uint32_t flags) + + if (!h2s->cs) { + /* this stream was already orphaned */ +- h2s_close(h2s); +- eb32_delete(&h2s->by_id); +- pool_free(pool_head_h2s, h2s); ++ h2s_detach(h2s); ++ h2s_free(h2s); + continue; + } + +@@ -2098,9 +2109,8 @@ static int h2_process_mux(struct h2c *h2c) + h2s->cs->flags &= ~CS_FL_DATA_WR_ENA; + else { + /* just sent the last frame for this orphaned stream */ +- h2s_close(h2s); +- eb32_delete(&h2s->by_id); +- pool_free(pool_head_h2s, h2s); ++ h2s_detach(h2s); ++ h2s_free(h2s); + } + } + } +@@ -2141,9 +2151,8 @@ static int h2_process_mux(struct h2c *h2c) + h2s->cs->flags &= ~CS_FL_DATA_WR_ENA; + else { + /* just sent the last frame for this orphaned stream */ +- h2s_close(h2s); +- eb32_delete(&h2s->by_id); +- pool_free(pool_head_h2s, h2s); ++ h2s_detach(h2s); ++ h2s_free(h2s); + } + } + } +@@ -2500,8 +2509,7 @@ static void h2_detach(struct conn_stream *cs) + + if (h2s->by_id.node.leaf_p) { + /* h2s still attached to the h2c */ +- h2s_close(h2s); +- eb32_delete(&h2s->by_id); ++ h2s_detach(h2s); + + /* We don't want to close right now unless we're removing the + * last stream, and either the connection is in error, or it +@@ -2526,7 +2534,7 @@ static void h2_detach(struct conn_stream *cs) + h2c->task->expire = TICK_ETERNITY; + } + } +- pool_free(pool_head_h2s, h2s); ++ h2s_free(h2s); + } + + static void h2_shutr(struct conn_stream *cs, enum cs_shr_mode mode) +-- +1.7.10.4 + diff --git a/net/haproxy/patches/0003-BUG-MAJOR-h2-remove-orphaned-streams-from-the-send-list-before-closing.patch b/net/haproxy/patches/0003-BUG-MAJOR-h2-remove-orphaned-streams-from-the-send-list-before-closing.patch new file mode 100755 index 000000000..6940e6aa5 --- /dev/null +++ b/net/haproxy/patches/0003-BUG-MAJOR-h2-remove-orphaned-streams-from-the-send-list-before-closing.patch @@ -0,0 +1,71 @@ +From cf2ab4d22d977b172cf155e14060cf0f785f8404 Mon Sep 17 00:00:00 2001 +From: Willy Tarreau +Date: Wed, 28 Mar 2018 11:29:04 +0200 +Subject: [PATCH] BUG/MAJOR: h2: remove orphaned streams from the send list + before closing + +Several people reported very strange occasional crashes when using H2. +Every time it appeared that either an h2s or a task was corrupted. The +outcome is that a missing LIST_DEL() when removing an orphaned stream +from the list in h2_wake_some_streams() can cause this stream to +remain present in the send list after it was freed. This may happen +when receiving a GOAWAY frame for example. In the mean time the send +list may be processed due to pending streams, and the just released +stream is still found. If due to a buffer full condition we left the +h2_process_demux() loop before being able to process the pending +stream, the pool entry may be reassigned somewhere else. Either another +h2 connection will get it, or a task, since they are the same size and +are shared. Then upon next pass in h2_process_mux(), the stream is +processed again. Either it crashes here due to modifications, or the +contents are harmless to it and its last changes affect the other object +reasigned to this area (typically a struct task). In the case of a +collision with struct task, the LIST_DEL operation performed on h2s +corrupts the task's wait queue's leaf_p pointer, thus all the wait +queue's structure. + +The fix consists in always performing the LIST_DEL in h2s_detach(). +It will also make h2s_stream_new() more robust against a possible +future situation where stream_create_from_cs() could have sent data +before failing. + +Many thanks to all the reporters who provided extremely valuable +information, traces and/or cores, namely Thierry Fournier, Yves Lafon, +Holger Amann, Peter Lindegaard Hansen, and discourse user "slawekc". + +This fix must be backported to 1.8. It is probably better to also +backport the following code cleanups with it as well to limit the +divergence between master and 1.8-stable : + + 00dd078 CLEANUP: h2: rename misleading h2c_stream_close() to h2s_close() + 0a10de6 MINOR: h2: provide and use h2s_detach() and h2s_free() + +(cherry picked from commit 4a333d3d53af786fe09df2f83b4e5db38cfef004) +Signed-off-by: Willy Tarreau +--- + src/mux_h2.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/src/mux_h2.c b/src/mux_h2.c +index ff1de8c..ac5e34f 100644 +--- a/src/mux_h2.c ++++ b/src/mux_h2.c +@@ -645,6 +645,8 @@ static inline void h2s_close(struct h2s *h2s) + static void h2s_detach(struct h2s *h2s) + { + h2s_close(h2s); ++ LIST_DEL(&h2s->list); ++ LIST_INIT(&h2s->list); + eb32_delete(&h2s->by_id); + } + +@@ -2495,6 +2497,7 @@ static void h2_detach(struct conn_stream *cs) + + /* the stream could be in the send list */ + LIST_DEL(&h2s->list); ++ LIST_INIT(&h2s->list); + + if ((h2c->flags & H2_CF_DEM_BLOCK_ANY && h2s->id == h2c->dsi) || + (h2c->flags & H2_CF_MUX_BLOCK_ANY && h2s->id == h2c->msi)) { +-- +1.7.10.4 + diff --git a/net/haproxy/patches/0004-BUG-MEDIUM-h2-threads-never-release-the-task-outside-of-the-task-handler.patch b/net/haproxy/patches/0004-BUG-MEDIUM-h2-threads-never-release-the-task-outside-of-the-task-handler.patch new file mode 100755 index 000000000..abd3e7c0d --- /dev/null +++ b/net/haproxy/patches/0004-BUG-MEDIUM-h2-threads-never-release-the-task-outside-of-the-task-handler.patch @@ -0,0 +1,91 @@ +From 4f2bd42ed3870dbaf143701f0cfbd64966d44252 Mon Sep 17 00:00:00 2001 +From: Willy Tarreau +Date: Thu, 29 Mar 2018 15:22:59 +0200 +Subject: [PATCH] BUG/MEDIUM: h2/threads: never release the task outside of + the task handler + +Currently, h2_release() will release all resources assigned to the h2 +connection, including the timeout task if any. But since the multi-threaded +scheduler, the timeout task could very well be queued in the thread-local +list of running tasks without any way to remove it, so task_delete() will +have no effect and task_free() will cause this undefined object to be +dereferenced. + +In order to prevent this from happening, we never release the task in +h2_release(), instead we wake it up after marking its context NULL so that +the task handler can release the task. + +Future improvements could consist in modifying the scheduler so that a +task_wakeup() has to be done on any task having to be killed, letting +the scheduler take care of it. + +This fix must be backported to 1.8. This bug was apparently not reported +so far. + +(cherry picked from commit 0975f11d554baf30602ce4be3faf0b9741711a80) +Signed-off-by: Willy Tarreau +--- + src/mux_h2.c | 30 +++++++++++++++++------------- + 1 file changed, 17 insertions(+), 13 deletions(-) + +diff --git a/src/mux_h2.c b/src/mux_h2.c +index 3c076d2..92fae06 100644 +--- a/src/mux_h2.c ++++ b/src/mux_h2.c +@@ -484,8 +484,8 @@ static void h2_release(struct connection *conn) + HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); + + if (h2c->task) { +- task_delete(h2c->task); +- task_free(h2c->task); ++ h2c->task->context = NULL; ++ task_wakeup(h2c->task, TASK_WOKEN_OTHER); + h2c->task = NULL; + } + +@@ -2369,9 +2369,18 @@ static struct task *h2_timeout_task(struct task *t) + struct h2c *h2c = t->context; + int expired = tick_is_expired(t->expire, now_ms); + +- if (!expired) ++ if (!expired && h2c) + return t; + ++ task_delete(t); ++ task_free(t); ++ ++ if (!h2c) { ++ /* resources were already deleted */ ++ return NULL; ++ } ++ ++ h2c->task = NULL; + h2c_error(h2c, H2_ERR_NO_ERROR); + h2_wake_some_streams(h2c, 0, 0); + +@@ -2388,17 +2397,12 @@ static struct task *h2_timeout_task(struct task *t) + if (h2c->mbuf->o && !(h2c->flags & H2_CF_GOAWAY_FAILED) && conn_xprt_ready(h2c->conn)) + h2c->conn->xprt->snd_buf(h2c->conn, h2c->mbuf, 0); + +- if (!eb_is_empty(&h2c->streams_by_id)) +- goto wait; +- +- h2_release(h2c->conn); +- return NULL; ++ /* either we can release everything now or it will be done later once ++ * the last stream closes. ++ */ ++ if (eb_is_empty(&h2c->streams_by_id)) ++ h2_release(h2c->conn); + +- wait: +- /* the streams have been notified, we must let them finish and close */ +- h2c->task = NULL; +- task_delete(t); +- task_free(t); + return NULL; + } + +-- +1.7.10.4 + diff --git a/net/haproxy/patches/0005-BUG-MEDIUM-h2-dont-consider-pending-data-on-detach-if-connection-is-in-error.patch b/net/haproxy/patches/0005-BUG-MEDIUM-h2-dont-consider-pending-data-on-detach-if-connection-is-in-error.patch new file mode 100755 index 000000000..24b508b03 --- /dev/null +++ b/net/haproxy/patches/0005-BUG-MEDIUM-h2-dont-consider-pending-data-on-detach-if-connection-is-in-error.patch @@ -0,0 +1,39 @@ +From 58cef63f20cc40248cd1cd113571cae588943d06 Mon Sep 17 00:00:00 2001 +From: Willy Tarreau +Date: Thu, 29 Mar 2018 15:41:32 +0200 +Subject: [PATCH] BUG/MEDIUM: h2: don't consider pending data on detach if + connection is in error + +Interrupting an h2load test shows that some connections remain active till +the client timeout. This is due to the fact that h2_detach() immediately +returns if the h2s flags indicate that the h2s is still waiting for some +buffer room in the output mux (possibly to emit a response or to send some +window updates). If the connection is broken, these data will never leave +and must not prevent the stream from being terminated nor the connection +from being released. + +This fix must be backported to 1.8. + +(cherry picked from commit 3041fcc2fde3f3f33418c9f579b657d993b0006d) +Signed-off-by: Willy Tarreau +--- + src/mux_h2.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/src/mux_h2.c b/src/mux_h2.c +index 92fae06..4d30f91 100644 +--- a/src/mux_h2.c ++++ b/src/mux_h2.c +@@ -2487,7 +2487,8 @@ static void h2_detach(struct conn_stream *cs) + /* this stream may be blocked waiting for some data to leave (possibly + * an ES or RST frame), so orphan it in this case. + */ +- if (h2s->flags & (H2_SF_BLK_MBUSY | H2_SF_BLK_MROOM | H2_SF_BLK_MFCTL)) ++ if (!(cs->conn->flags & CO_FL_ERROR) && ++ (h2s->flags & (H2_SF_BLK_MBUSY | H2_SF_BLK_MROOM | H2_SF_BLK_MFCTL))) + return; + + if ((h2c->flags & H2_CF_DEM_BLOCK_ANY && h2s->id == h2c->dsi) || +-- +1.7.10.4 + From c8095562dba4a891d0e5f20e1f1fe762772c8ab2 Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Thu, 5 Apr 2018 15:56:37 +0200 Subject: [PATCH 05/13] haproxy: Update HAProxy to v1.8.6 - Update haproxy download URL and hash - Remove all already included patches Signed-off-by: Christian Lachner --- net/haproxy/Makefile | 6 +- ...eading-h2c_stream_close-to-h2s_close.patch | 167 ------------------ ...vide-and-use-h2s_detach-and-h2s_free.patch | 109 ------------ ...ms-from-the-send-list-before-closing.patch | 71 -------- ...the-task-outside-of-the-task-handler.patch | 91 ---------- ...-on-detach-if-connection-is-in-error.patch | 39 ---- 6 files changed, 3 insertions(+), 480 deletions(-) delete mode 100755 net/haproxy/patches/0001-CLEANUP-h2-rename-misleading-h2c_stream_close-to-h2s_close.patch delete mode 100755 net/haproxy/patches/0002-MINOR-h2-provide-and-use-h2s_detach-and-h2s_free.patch delete mode 100755 net/haproxy/patches/0003-BUG-MAJOR-h2-remove-orphaned-streams-from-the-send-list-before-closing.patch delete mode 100755 net/haproxy/patches/0004-BUG-MEDIUM-h2-threads-never-release-the-task-outside-of-the-task-handler.patch delete mode 100755 net/haproxy/patches/0005-BUG-MEDIUM-h2-dont-consider-pending-data-on-detach-if-connection-is-in-error.patch diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index 5ee0f9eb6..b50321488 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -9,12 +9,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy -PKG_VERSION:=1.8.5 -PKG_RELEASE:=02 +PKG_VERSION:=1.8.6 +PKG_RELEASE:=01 PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/ -PKG_HASH:=1c22083fa85332d5ab1c9aa8a7ec47a28d87ad9d802558808f9921d938ba20c9 +PKG_HASH:=05aee8d3894679b88f32dc19b526ea9b09af98d7bd9e41bcc1e589c9c9b2c780 PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION) PKG_LICENSE:=GPL-2.0 diff --git a/net/haproxy/patches/0001-CLEANUP-h2-rename-misleading-h2c_stream_close-to-h2s_close.patch b/net/haproxy/patches/0001-CLEANUP-h2-rename-misleading-h2c_stream_close-to-h2s_close.patch deleted file mode 100755 index 87e3e49c4..000000000 --- a/net/haproxy/patches/0001-CLEANUP-h2-rename-misleading-h2c_stream_close-to-h2s_close.patch +++ /dev/null @@ -1,167 +0,0 @@ -From 27b2c5ead5cf85626d4169ab46b3246d65033b58 Mon Sep 17 00:00:00 2001 -From: Willy Tarreau -Date: Thu, 1 Mar 2018 16:31:34 +0100 -Subject: [PATCH] CLEANUP: h2: rename misleading h2c_stream_close() to - h2s_close() - -This function takes an h2c and an h2s but it never uses the h2c, which -is a bit confusing at some places in the code. Let's make it clear that -it only operates on the h2s instead by renaming it and removing the -unused h2c argument. - -(cherry picked from commit 00dd07895a6ee856c811c6d60a8e3d4c7d973c63) -Signed-off-by: Willy Tarreau ---- - src/mux_h2.c | 37 ++++++++++++++++++------------------- - 1 file changed, 18 insertions(+), 19 deletions(-) - -diff --git a/src/mux_h2.c b/src/mux_h2.c -index bb0a3e3..0bb79a4 100644 ---- a/src/mux_h2.c -+++ b/src/mux_h2.c -@@ -630,12 +630,11 @@ static inline __maybe_unused int h2_get_frame_hdr(struct buffer *b, struct h2_fh - return ret; - } - --/* marks stream as CLOSED for connection and decrement the number -- * of active streams for this connection if the stream was not yet closed. -- * Please use this exclusively before closing a stream to ensure stream count -- * is well maintained. -+/* marks stream as CLOSED and decrement the number of active streams for -+ * its connection if the stream was not yet closed. Please use this exclusively -+ * before closing a stream to ensure stream count is well maintained. - */ --static inline void h2c_stream_close(struct h2c *h2c, struct h2s *h2s) -+static inline void h2s_close(struct h2s *h2s) - { - if (h2s->st != H2_SS_CLOSED) - h2s->h2c->nb_streams--; -@@ -924,7 +923,7 @@ static int h2s_send_rst_stream(struct h2c *h2c, struct h2s *h2s) - - ignore: - h2s->flags |= H2_SF_RST_SENT; -- h2c_stream_close(h2c, h2s); -+ h2s_close(h2s); - return ret; - } - -@@ -988,7 +987,7 @@ static int h2c_send_rst_stream(struct h2c *h2c, struct h2s *h2s) - ignore: - if (h2s->st > H2_SS_IDLE && h2s->st < H2_SS_CLOSED) { - h2s->flags |= H2_SF_RST_SENT; -- h2c_stream_close(h2c, h2s); -+ h2s_close(h2s); - } - - return ret; -@@ -1066,7 +1065,7 @@ static void h2_wake_some_streams(struct h2c *h2c, int last, uint32_t flags) - - if (!h2s->cs) { - /* this stream was already orphaned */ -- h2c_stream_close(h2c, h2s); -+ h2s_close(h2s); - eb32_delete(&h2s->by_id); - pool_free(pool_head_h2s, h2s); - continue; -@@ -1084,7 +1083,7 @@ static void h2_wake_some_streams(struct h2c *h2c, int last, uint32_t flags) - else if (flags & CS_FL_EOS && h2s->st == H2_SS_OPEN) - h2s->st = H2_SS_HREM; - else if (flags & CS_FL_EOS && h2s->st == H2_SS_HLOC) -- h2c_stream_close(h2c, h2s); -+ h2s_close(h2s); - } - } - -@@ -1551,7 +1550,7 @@ static int h2c_handle_rst_stream(struct h2c *h2c, struct h2s *h2s) - return 1; - - h2s->errcode = h2_get_n32(h2c->dbuf, 0); -- h2c_stream_close(h2c, h2s); -+ h2s_close(h2s); - - if (h2s->cs) { - h2s->cs->flags |= CS_FL_EOS | CS_FL_ERROR; -@@ -2099,7 +2098,7 @@ static int h2_process_mux(struct h2c *h2c) - h2s->cs->flags &= ~CS_FL_DATA_WR_ENA; - else { - /* just sent the last frame for this orphaned stream */ -- h2c_stream_close(h2c, h2s); -+ h2s_close(h2s); - eb32_delete(&h2s->by_id); - pool_free(pool_head_h2s, h2s); - } -@@ -2142,7 +2141,7 @@ static int h2_process_mux(struct h2c *h2c) - h2s->cs->flags &= ~CS_FL_DATA_WR_ENA; - else { - /* just sent the last frame for this orphaned stream */ -- h2c_stream_close(h2c, h2s); -+ h2s_close(h2s); - eb32_delete(&h2s->by_id); - pool_free(pool_head_h2s, h2s); - } -@@ -2501,7 +2500,7 @@ static void h2_detach(struct conn_stream *cs) - - if (h2s->by_id.node.leaf_p) { - /* h2s still attached to the h2c */ -- h2c_stream_close(h2c, h2s); -+ h2s_close(h2s); - eb32_delete(&h2s->by_id); - - /* We don't want to close right now unless we're removing the -@@ -2557,7 +2556,7 @@ static void h2_shutr(struct conn_stream *cs, enum cs_shr_mode mode) - if (h2s->h2c->mbuf->o && !(cs->conn->flags & CO_FL_XPRT_WR_ENA)) - conn_xprt_want_send(cs->conn); - -- h2c_stream_close(h2s->h2c, h2s); -+ h2s_close(h2s); - } - - static void h2_shutw(struct conn_stream *cs, enum cs_shw_mode mode) -@@ -2575,7 +2574,7 @@ static void h2_shutw(struct conn_stream *cs, enum cs_shw_mode mode) - return; - - if (h2s->st == H2_SS_HREM) -- h2c_stream_close(h2s->h2c, h2s); -+ h2s_close(h2s); - else - h2s->st = H2_SS_HLOC; - } else { -@@ -2593,7 +2592,7 @@ static void h2_shutw(struct conn_stream *cs, enum cs_shw_mode mode) - h2c_send_goaway_error(h2s->h2c, h2s) <= 0) - return; - -- h2c_stream_close(h2s->h2c, h2s); -+ h2s_close(h2s); - } - - if (h2s->h2c->mbuf->o && !(cs->conn->flags & CO_FL_XPRT_WR_ENA)) -@@ -3049,7 +3048,7 @@ static int h2s_frt_make_resp_headers(struct h2s *h2s, struct buffer *buf) - if (h2s->st == H2_SS_OPEN) - h2s->st = H2_SS_HLOC; - else -- h2c_stream_close(h2c, h2s); -+ h2s_close(h2s); - } - else if (h1m->status >= 100 && h1m->status < 200) { - /* we'll let the caller check if it has more headers to send */ -@@ -3291,7 +3290,7 @@ static int h2s_frt_make_resp_data(struct h2s *h2s, struct buffer *buf) - if (h2s->st == H2_SS_OPEN) - h2s->st = H2_SS_HLOC; - else -- h2c_stream_close(h2c, h2s); -+ h2s_close(h2s); - - if (!(h1m->flags & H1_MF_CHNK)) { - // trim any possibly pending data (eg: inconsistent content-length) -@@ -3364,7 +3363,7 @@ static int h2_snd_buf(struct conn_stream *cs, struct buffer *buf, int flags) - if (h2s->st == H2_SS_ERROR || h2s->flags & H2_SF_RST_RCVD) { - cs->flags |= CS_FL_ERROR; - if (h2s_send_rst_stream(h2s->h2c, h2s) > 0) -- h2c_stream_close(h2s->h2c, h2s); -+ h2s_close(h2s); - } - - if (h2s->flags & H2_SF_BLK_SFCTL) { --- -1.7.10.4 - diff --git a/net/haproxy/patches/0002-MINOR-h2-provide-and-use-h2s_detach-and-h2s_free.patch b/net/haproxy/patches/0002-MINOR-h2-provide-and-use-h2s_detach-and-h2s_free.patch deleted file mode 100755 index 17d085983..000000000 --- a/net/haproxy/patches/0002-MINOR-h2-provide-and-use-h2s_detach-and-h2s_free.patch +++ /dev/null @@ -1,109 +0,0 @@ -From 518db3f8602fae9caa816ec373855cf0f8c6c45d Mon Sep 17 00:00:00 2001 -From: Willy Tarreau -Date: Thu, 1 Mar 2018 16:27:53 +0100 -Subject: [PATCH] MINOR: h2: provide and use h2s_detach() and h2s_free() - -These ones save us from open-coding the cleanup functions on each and -every error path. The code was updated to use them with no functional -change. - -(cherry picked from commit 0a10de606685ed4e65d4cc84237c6a09dd6fe27c) -Signed-off-by: Willy Tarreau ---- - src/mux_h2.c | 38 +++++++++++++++++++++++--------------- - 1 file changed, 23 insertions(+), 15 deletions(-) - -diff --git a/src/mux_h2.c b/src/mux_h2.c -index 0bb79a4..ff1de8c 100644 ---- a/src/mux_h2.c -+++ b/src/mux_h2.c -@@ -641,6 +641,19 @@ static inline void h2s_close(struct h2s *h2s) - h2s->st = H2_SS_CLOSED; - } - -+/* detaches an H2 stream from its H2C. */ -+static void h2s_detach(struct h2s *h2s) -+{ -+ h2s_close(h2s); -+ eb32_delete(&h2s->by_id); -+} -+ -+/* releases an H2 stream back to the pool, and detaches it from the h2c. */ -+static void h2s_free(struct h2s *h2s) -+{ -+ pool_free(pool_head_h2s, h2s); -+} -+ - /* creates a new stream on the h2c connection and returns it, or NULL in - * case of memory allocation error. - */ -@@ -685,9 +698,8 @@ static struct h2s *h2c_stream_new(struct h2c *h2c, int id) - out_free_cs: - cs_free(cs); - out_close: -- h2c->nb_streams--; -- eb32_delete(&h2s->by_id); -- pool_free(pool_head_h2s, h2s); -+ h2s_detach(h2s); -+ h2s_free(h2s); - h2s = NULL; - out: - return h2s; -@@ -1065,9 +1077,8 @@ static void h2_wake_some_streams(struct h2c *h2c, int last, uint32_t flags) - - if (!h2s->cs) { - /* this stream was already orphaned */ -- h2s_close(h2s); -- eb32_delete(&h2s->by_id); -- pool_free(pool_head_h2s, h2s); -+ h2s_detach(h2s); -+ h2s_free(h2s); - continue; - } - -@@ -2098,9 +2109,8 @@ static int h2_process_mux(struct h2c *h2c) - h2s->cs->flags &= ~CS_FL_DATA_WR_ENA; - else { - /* just sent the last frame for this orphaned stream */ -- h2s_close(h2s); -- eb32_delete(&h2s->by_id); -- pool_free(pool_head_h2s, h2s); -+ h2s_detach(h2s); -+ h2s_free(h2s); - } - } - } -@@ -2141,9 +2151,8 @@ static int h2_process_mux(struct h2c *h2c) - h2s->cs->flags &= ~CS_FL_DATA_WR_ENA; - else { - /* just sent the last frame for this orphaned stream */ -- h2s_close(h2s); -- eb32_delete(&h2s->by_id); -- pool_free(pool_head_h2s, h2s); -+ h2s_detach(h2s); -+ h2s_free(h2s); - } - } - } -@@ -2500,8 +2509,7 @@ static void h2_detach(struct conn_stream *cs) - - if (h2s->by_id.node.leaf_p) { - /* h2s still attached to the h2c */ -- h2s_close(h2s); -- eb32_delete(&h2s->by_id); -+ h2s_detach(h2s); - - /* We don't want to close right now unless we're removing the - * last stream, and either the connection is in error, or it -@@ -2526,7 +2534,7 @@ static void h2_detach(struct conn_stream *cs) - h2c->task->expire = TICK_ETERNITY; - } - } -- pool_free(pool_head_h2s, h2s); -+ h2s_free(h2s); - } - - static void h2_shutr(struct conn_stream *cs, enum cs_shr_mode mode) --- -1.7.10.4 - diff --git a/net/haproxy/patches/0003-BUG-MAJOR-h2-remove-orphaned-streams-from-the-send-list-before-closing.patch b/net/haproxy/patches/0003-BUG-MAJOR-h2-remove-orphaned-streams-from-the-send-list-before-closing.patch deleted file mode 100755 index 6940e6aa5..000000000 --- a/net/haproxy/patches/0003-BUG-MAJOR-h2-remove-orphaned-streams-from-the-send-list-before-closing.patch +++ /dev/null @@ -1,71 +0,0 @@ -From cf2ab4d22d977b172cf155e14060cf0f785f8404 Mon Sep 17 00:00:00 2001 -From: Willy Tarreau -Date: Wed, 28 Mar 2018 11:29:04 +0200 -Subject: [PATCH] BUG/MAJOR: h2: remove orphaned streams from the send list - before closing - -Several people reported very strange occasional crashes when using H2. -Every time it appeared that either an h2s or a task was corrupted. The -outcome is that a missing LIST_DEL() when removing an orphaned stream -from the list in h2_wake_some_streams() can cause this stream to -remain present in the send list after it was freed. This may happen -when receiving a GOAWAY frame for example. In the mean time the send -list may be processed due to pending streams, and the just released -stream is still found. If due to a buffer full condition we left the -h2_process_demux() loop before being able to process the pending -stream, the pool entry may be reassigned somewhere else. Either another -h2 connection will get it, or a task, since they are the same size and -are shared. Then upon next pass in h2_process_mux(), the stream is -processed again. Either it crashes here due to modifications, or the -contents are harmless to it and its last changes affect the other object -reasigned to this area (typically a struct task). In the case of a -collision with struct task, the LIST_DEL operation performed on h2s -corrupts the task's wait queue's leaf_p pointer, thus all the wait -queue's structure. - -The fix consists in always performing the LIST_DEL in h2s_detach(). -It will also make h2s_stream_new() more robust against a possible -future situation where stream_create_from_cs() could have sent data -before failing. - -Many thanks to all the reporters who provided extremely valuable -information, traces and/or cores, namely Thierry Fournier, Yves Lafon, -Holger Amann, Peter Lindegaard Hansen, and discourse user "slawekc". - -This fix must be backported to 1.8. It is probably better to also -backport the following code cleanups with it as well to limit the -divergence between master and 1.8-stable : - - 00dd078 CLEANUP: h2: rename misleading h2c_stream_close() to h2s_close() - 0a10de6 MINOR: h2: provide and use h2s_detach() and h2s_free() - -(cherry picked from commit 4a333d3d53af786fe09df2f83b4e5db38cfef004) -Signed-off-by: Willy Tarreau ---- - src/mux_h2.c | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/src/mux_h2.c b/src/mux_h2.c -index ff1de8c..ac5e34f 100644 ---- a/src/mux_h2.c -+++ b/src/mux_h2.c -@@ -645,6 +645,8 @@ static inline void h2s_close(struct h2s *h2s) - static void h2s_detach(struct h2s *h2s) - { - h2s_close(h2s); -+ LIST_DEL(&h2s->list); -+ LIST_INIT(&h2s->list); - eb32_delete(&h2s->by_id); - } - -@@ -2495,6 +2497,7 @@ static void h2_detach(struct conn_stream *cs) - - /* the stream could be in the send list */ - LIST_DEL(&h2s->list); -+ LIST_INIT(&h2s->list); - - if ((h2c->flags & H2_CF_DEM_BLOCK_ANY && h2s->id == h2c->dsi) || - (h2c->flags & H2_CF_MUX_BLOCK_ANY && h2s->id == h2c->msi)) { --- -1.7.10.4 - diff --git a/net/haproxy/patches/0004-BUG-MEDIUM-h2-threads-never-release-the-task-outside-of-the-task-handler.patch b/net/haproxy/patches/0004-BUG-MEDIUM-h2-threads-never-release-the-task-outside-of-the-task-handler.patch deleted file mode 100755 index abd3e7c0d..000000000 --- a/net/haproxy/patches/0004-BUG-MEDIUM-h2-threads-never-release-the-task-outside-of-the-task-handler.patch +++ /dev/null @@ -1,91 +0,0 @@ -From 4f2bd42ed3870dbaf143701f0cfbd64966d44252 Mon Sep 17 00:00:00 2001 -From: Willy Tarreau -Date: Thu, 29 Mar 2018 15:22:59 +0200 -Subject: [PATCH] BUG/MEDIUM: h2/threads: never release the task outside of - the task handler - -Currently, h2_release() will release all resources assigned to the h2 -connection, including the timeout task if any. But since the multi-threaded -scheduler, the timeout task could very well be queued in the thread-local -list of running tasks without any way to remove it, so task_delete() will -have no effect and task_free() will cause this undefined object to be -dereferenced. - -In order to prevent this from happening, we never release the task in -h2_release(), instead we wake it up after marking its context NULL so that -the task handler can release the task. - -Future improvements could consist in modifying the scheduler so that a -task_wakeup() has to be done on any task having to be killed, letting -the scheduler take care of it. - -This fix must be backported to 1.8. This bug was apparently not reported -so far. - -(cherry picked from commit 0975f11d554baf30602ce4be3faf0b9741711a80) -Signed-off-by: Willy Tarreau ---- - src/mux_h2.c | 30 +++++++++++++++++------------- - 1 file changed, 17 insertions(+), 13 deletions(-) - -diff --git a/src/mux_h2.c b/src/mux_h2.c -index 3c076d2..92fae06 100644 ---- a/src/mux_h2.c -+++ b/src/mux_h2.c -@@ -484,8 +484,8 @@ static void h2_release(struct connection *conn) - HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); - - if (h2c->task) { -- task_delete(h2c->task); -- task_free(h2c->task); -+ h2c->task->context = NULL; -+ task_wakeup(h2c->task, TASK_WOKEN_OTHER); - h2c->task = NULL; - } - -@@ -2369,9 +2369,18 @@ static struct task *h2_timeout_task(struct task *t) - struct h2c *h2c = t->context; - int expired = tick_is_expired(t->expire, now_ms); - -- if (!expired) -+ if (!expired && h2c) - return t; - -+ task_delete(t); -+ task_free(t); -+ -+ if (!h2c) { -+ /* resources were already deleted */ -+ return NULL; -+ } -+ -+ h2c->task = NULL; - h2c_error(h2c, H2_ERR_NO_ERROR); - h2_wake_some_streams(h2c, 0, 0); - -@@ -2388,17 +2397,12 @@ static struct task *h2_timeout_task(struct task *t) - if (h2c->mbuf->o && !(h2c->flags & H2_CF_GOAWAY_FAILED) && conn_xprt_ready(h2c->conn)) - h2c->conn->xprt->snd_buf(h2c->conn, h2c->mbuf, 0); - -- if (!eb_is_empty(&h2c->streams_by_id)) -- goto wait; -- -- h2_release(h2c->conn); -- return NULL; -+ /* either we can release everything now or it will be done later once -+ * the last stream closes. -+ */ -+ if (eb_is_empty(&h2c->streams_by_id)) -+ h2_release(h2c->conn); - -- wait: -- /* the streams have been notified, we must let them finish and close */ -- h2c->task = NULL; -- task_delete(t); -- task_free(t); - return NULL; - } - --- -1.7.10.4 - diff --git a/net/haproxy/patches/0005-BUG-MEDIUM-h2-dont-consider-pending-data-on-detach-if-connection-is-in-error.patch b/net/haproxy/patches/0005-BUG-MEDIUM-h2-dont-consider-pending-data-on-detach-if-connection-is-in-error.patch deleted file mode 100755 index 24b508b03..000000000 --- a/net/haproxy/patches/0005-BUG-MEDIUM-h2-dont-consider-pending-data-on-detach-if-connection-is-in-error.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 58cef63f20cc40248cd1cd113571cae588943d06 Mon Sep 17 00:00:00 2001 -From: Willy Tarreau -Date: Thu, 29 Mar 2018 15:41:32 +0200 -Subject: [PATCH] BUG/MEDIUM: h2: don't consider pending data on detach if - connection is in error - -Interrupting an h2load test shows that some connections remain active till -the client timeout. This is due to the fact that h2_detach() immediately -returns if the h2s flags indicate that the h2s is still waiting for some -buffer room in the output mux (possibly to emit a response or to send some -window updates). If the connection is broken, these data will never leave -and must not prevent the stream from being terminated nor the connection -from being released. - -This fix must be backported to 1.8. - -(cherry picked from commit 3041fcc2fde3f3f33418c9f579b657d993b0006d) -Signed-off-by: Willy Tarreau ---- - src/mux_h2.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/src/mux_h2.c b/src/mux_h2.c -index 92fae06..4d30f91 100644 ---- a/src/mux_h2.c -+++ b/src/mux_h2.c -@@ -2487,7 +2487,8 @@ static void h2_detach(struct conn_stream *cs) - /* this stream may be blocked waiting for some data to leave (possibly - * an ES or RST frame), so orphan it in this case. - */ -- if (h2s->flags & (H2_SF_BLK_MBUSY | H2_SF_BLK_MROOM | H2_SF_BLK_MFCTL)) -+ if (!(cs->conn->flags & CO_FL_ERROR) && -+ (h2s->flags & (H2_SF_BLK_MBUSY | H2_SF_BLK_MROOM | H2_SF_BLK_MFCTL))) - return; - - if ((h2c->flags & H2_CF_DEM_BLOCK_ANY && h2s->id == h2c->dsi) || --- -1.7.10.4 - From 157bb0f89d92a1695178cc1db3f720983ad06798 Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Sat, 7 Apr 2018 18:08:33 +0200 Subject: [PATCH 06/13] haproxy: Update HAProxy to v1.8.7 - Update haproxy download URL and hash Signed-off-by: Christian Lachner --- net/haproxy/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index b50321488..531c0419b 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -9,12 +9,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy -PKG_VERSION:=1.8.6 +PKG_VERSION:=1.8.7 PKG_RELEASE:=01 PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/ -PKG_HASH:=05aee8d3894679b88f32dc19b526ea9b09af98d7bd9e41bcc1e589c9c9b2c780 +PKG_HASH:=0584a52c9a9095470be8d4216b31e7a312873752d5eb66be4eb3ce51b2875317 PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION) PKG_LICENSE:=GPL-2.0 From a3eff197e0e62b2b3f3c650b54336293a0dd4f8c Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Fri, 20 Apr 2018 14:20:24 +0200 Subject: [PATCH 07/13] haproxy: Update HAProxy to v1.8.8 - Update haproxy download URL and hash Signed-off-by: Christian Lachner --- net/haproxy/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index 531c0419b..6f07eaf20 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -9,12 +9,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy -PKG_VERSION:=1.8.7 +PKG_VERSION:=1.8.8 PKG_RELEASE:=01 PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/ -PKG_HASH:=0584a52c9a9095470be8d4216b31e7a312873752d5eb66be4eb3ce51b2875317 +PKG_HASH:=bcc05ab824bd2f89b8b21ac05459c0a0a0e02247b57ffe441d52cfe771daea92 PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION) PKG_LICENSE:=GPL-2.0 From f609448491411a3e9bc14b20db1a8fa258f935ba Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Sun, 29 Apr 2018 10:00:22 +0200 Subject: [PATCH 08/13] haproxy: Add a script for package maintainers to simplify upstream patch collection Manually fetching patches is cumbersome so I created a simple bash-script which uses Git-mechanisms to collect all patches inside a branch from a specific TAG to the current HEAD revision. Signed-off-by: Christian Lachner --- net/haproxy/get-latest-patches.sh | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100755 net/haproxy/get-latest-patches.sh diff --git a/net/haproxy/get-latest-patches.sh b/net/haproxy/get-latest-patches.sh new file mode 100755 index 000000000..b74107f21 --- /dev/null +++ b/net/haproxy/get-latest-patches.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +CLONEURL=http://git.haproxy.org/git/haproxy-1.8.git +BASE_TAG=v1.8.8 +TMP_REPODIR=tmprepo +PATCHESDIR=patches + +if test -d "${TMP_REPODIR}"; then rm -rf "${TMP_REPODIR}"; fi + +git clone "${CLONEURL}" "${TMP_REPODIR}" + +printf "Cleaning patches\n" +find ${PATCHESDIR} -type f -name "*.patch" -exec rm -f "{}" \; + +i=0 +for cid in $(git -C "${TMP_REPODIR}" rev-list ${BASE_TAG}..HEAD | tac); do + filename="$(printf "%04d" $i)-$(git -C "${TMP_REPODIR}" log --format=%s -n 1 $cid | sed -e"s/[()']//g" -e's/[^_a-zA-Z0-9+-]\+/-/g' -e's/-$//').patch" + printf "Creating ${filename}\n" + git -C "${TMP_REPODIR}" show $cid > "${PATCHESDIR}/$filename" + git add "${PATCHESDIR}/$filename" + let i++ +done + +rm -rf "${TMP_REPODIR}" + +printf "finished\n" + From c866db09111b700e8fc93d86347b19a27e138010 Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Sun, 29 Apr 2018 10:04:49 +0200 Subject: [PATCH 09/13] haproxy: Update all patches for HAProxy v1.8.8 - Add new patches (see https://www.haproxy.org/bugs/bugs-1.8.8.html) - Raise patch-level to 02 Signed-off-by: Christian Lachner --- net/haproxy/Makefile | 2 +- ...issing-HA_SPIN_INIT-in-pat_ref_newid.patch | 26 +++ ...-trying-to-read-from-a-closed-socket.patch | 87 ++++++++++ ...idle-Ti-is-not-set-for-some-requests.patch | 50 ++++++ ...gmentation-fault-if-a-Lua-task-exits.patch | 48 +++++ ...nce-of-CONNECT-and-or-content-length.patch | 147 ++++++++++++++++ ...-support-for-chunked-encoded-uploads.patch | 164 ++++++++++++++++++ ...s-tasks-sticky-to-the-current-thread.patch | 36 ++++ 8 files changed, 559 insertions(+), 1 deletion(-) create mode 100644 net/haproxy/patches/0000-BUG-MINOR-pattern-Add-a-missing-HA_SPIN_INIT-in-pat_ref_newid.patch create mode 100644 net/haproxy/patches/0001-BUG-MAJOR-channel-Fix-crash-when-trying-to-read-from-a-closed-socket.patch create mode 100644 net/haproxy/patches/0002-BUG-MINOR-log-t_idle-Ti-is-not-set-for-some-requests.patch create mode 100644 net/haproxy/patches/0003-BUG-MEDIUM-lua-Fix-segmentation-fault-if-a-Lua-task-exits.patch create mode 100644 net/haproxy/patches/0004-MINOR-h2-detect-presence-of-CONNECT-and-or-content-length.patch create mode 100644 net/haproxy/patches/0005-BUG-MEDIUM-h2-implement-missing-support-for-chunked-encoded-uploads.patch create mode 100644 net/haproxy/patches/0006-BUG-MINOR-lua-threads-Make-luas-tasks-sticky-to-the-current-thread.patch diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index 6f07eaf20..44544c97f 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -10,7 +10,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy PKG_VERSION:=1.8.8 -PKG_RELEASE:=01 +PKG_RELEASE:=02 PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/ diff --git a/net/haproxy/patches/0000-BUG-MINOR-pattern-Add-a-missing-HA_SPIN_INIT-in-pat_ref_newid.patch b/net/haproxy/patches/0000-BUG-MINOR-pattern-Add-a-missing-HA_SPIN_INIT-in-pat_ref_newid.patch new file mode 100644 index 000000000..eb3a18a10 --- /dev/null +++ b/net/haproxy/patches/0000-BUG-MINOR-pattern-Add-a-missing-HA_SPIN_INIT-in-pat_ref_newid.patch @@ -0,0 +1,26 @@ +commit 6c9efc8219e35f4eb17e94b364f4c371cfb56cca +Author: Aurélien Nephtali +Date: Thu Apr 19 16:56:07 2018 +0200 + + BUG/MINOR: pattern: Add a missing HA_SPIN_INIT() in pat_ref_newid() + + pat_ref_newid() is lacking a spinlock init. It was probably forgotten + in b5997f740b ("MAJOR: threads/map: Make acls/maps thread safe"). + + Signed-off-by: Aurélien Nephtali + (cherry picked from commit 564d15a71ecb3ae3372767866335cfbc068c4b48) + Signed-off-by: Christopher Faulet + +diff --git a/src/pattern.c b/src/pattern.c +index fe672f12..2eb82650 100644 +--- a/src/pattern.c ++++ b/src/pattern.c +@@ -1906,7 +1906,7 @@ struct pat_ref *pat_ref_newid(int unique_id, const char *display, unsigned int f + ref->unique_id = unique_id; + LIST_INIT(&ref->head); + LIST_INIT(&ref->pat); +- ++ HA_SPIN_INIT(&ref->lock); + LIST_ADDQ(&pattern_reference, &ref->list); + + return ref; diff --git a/net/haproxy/patches/0001-BUG-MAJOR-channel-Fix-crash-when-trying-to-read-from-a-closed-socket.patch b/net/haproxy/patches/0001-BUG-MAJOR-channel-Fix-crash-when-trying-to-read-from-a-closed-socket.patch new file mode 100644 index 000000000..606c3bf09 --- /dev/null +++ b/net/haproxy/patches/0001-BUG-MAJOR-channel-Fix-crash-when-trying-to-read-from-a-closed-socket.patch @@ -0,0 +1,87 @@ +commit e0f6d4a4e8696140d1fcff812fb287d534d702e9 +Author: Tim Duesterhus +Date: Tue Apr 24 19:20:43 2018 +0200 + + BUG/MAJOR: channel: Fix crash when trying to read from a closed socket + + When haproxy is compiled using GCC <= 3.x or >= 5.x the `unlikely` + macro performs a comparison with zero: `(x) != 0`, thus returning + either 0 or 1. + + In `int co_getline_nc()` this macro was accidentally applied to + the variable `retcode` itself, instead of the result of the + comparison `retcode <= 0`. As a result any negative `retcode` + is converted to `1` for purposes of the comparison. + Thus never taking the branch (and exiting the function) for + negative values. + + This in turn leads to reads of uninitialized memory in the for-loop + below: + + ==12141== Conditional jump or move depends on uninitialised value(s) + ==12141== at 0x4EB6B4: co_getline_nc (channel.c:346) + ==12141== by 0x421CA4: hlua_socket_receive_yield (hlua.c:1713) + ==12141== by 0x421F6F: hlua_socket_receive (hlua.c:1896) + ==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529B497: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529711A: lua_pcallk (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x52ABDF0: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529A9F1: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529B523: lua_resume (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== + ==12141== Use of uninitialised value of size 8 + ==12141== at 0x4EB6B9: co_getline_nc (channel.c:346) + ==12141== by 0x421CA4: hlua_socket_receive_yield (hlua.c:1713) + ==12141== by 0x421F6F: hlua_socket_receive (hlua.c:1896) + ==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529B497: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529711A: lua_pcallk (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x52ABDF0: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529A9F1: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529B523: lua_resume (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== + ==12141== Invalid read of size 1 + ==12141== at 0x4EB6B9: co_getline_nc (channel.c:346) + ==12141== by 0x421CA4: hlua_socket_receive_yield (hlua.c:1713) + ==12141== by 0x421F6F: hlua_socket_receive (hlua.c:1896) + ==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529B497: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529711A: lua_pcallk (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x52ABDF0: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529A9F1: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== by 0x529B523: lua_resume (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==12141== Address 0x8637171e928bb500 is not stack'd, malloc'd or (recently) free'd + + Fix this bug by correctly applying the `unlikely` macro to the result of the comparison. + + This bug exists as of commit ca16b038132444dea06e6d83953034128a812bce + which is the first commit adding this function. + + v1.6-dev1 is the first tag containing this commit, the fix should + be backported to haproxy 1.6 and newer. + + (cherry picked from commit 45be38c9c7ba2b20806f2b887876db4fb5b9457c) + Signed-off-by: Christopher Faulet + +diff --git a/src/channel.c b/src/channel.c +index bd5c4de0..3770502c 100644 +--- a/src/channel.c ++++ b/src/channel.c +@@ -340,7 +340,7 @@ int co_getline_nc(const struct channel *chn, + int l; + + retcode = co_getblk_nc(chn, blk1, len1, blk2, len2); +- if (unlikely(retcode) <= 0) ++ if (unlikely(retcode <= 0)) + return retcode; + + for (l = 0; l < *len1 && (*blk1)[l] != '\n'; l++); diff --git a/net/haproxy/patches/0002-BUG-MINOR-log-t_idle-Ti-is-not-set-for-some-requests.patch b/net/haproxy/patches/0002-BUG-MINOR-log-t_idle-Ti-is-not-set-for-some-requests.patch new file mode 100644 index 000000000..9fffb9597 --- /dev/null +++ b/net/haproxy/patches/0002-BUG-MINOR-log-t_idle-Ti-is-not-set-for-some-requests.patch @@ -0,0 +1,50 @@ +commit 0e645ba57ddff9163a3d9b5626f189e974e671bd +Author: Rian McGuire +Date: Tue Apr 24 11:19:21 2018 -0300 + + BUG/MINOR: log: t_idle (%Ti) is not set for some requests + + If TCP content inspection is used, msg_state can be >= HTTP_MSG_ERROR + the first time http_wait_for_request is called. t_idle was being left + unset in that case. + + In the example below : + stick-table type string len 64 size 100k expire 60s + tcp-request inspect-delay 1s + tcp-request content track-sc1 hdr(X-Session) + + %Ti will always be -1, because the msg_state is already at HTTP_MSG_BODY + when http_wait_for_request is called for the first time. + + This patch should backported to 1.8 and 1.7. + + (cherry picked from commit 89fcb7d929283e904cabad58de495d62fc753da2) + Signed-off-by: Christopher Faulet + +diff --git a/src/proto_http.c b/src/proto_http.c +index b38dd84f..4c18a27c 100644 +--- a/src/proto_http.c ++++ b/src/proto_http.c +@@ -1618,18 +1618,16 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit) + /* we're speaking HTTP here, so let's speak HTTP to the client */ + s->srv_error = http_return_srv_error; + ++ /* If there is data available for analysis, log the end of the idle time. */ ++ if (buffer_not_empty(req->buf) && s->logs.t_idle == -1) ++ s->logs.t_idle = tv_ms_elapsed(&s->logs.tv_accept, &now) - s->logs.t_handshake; ++ + /* There's a protected area at the end of the buffer for rewriting + * purposes. We don't want to start to parse the request if the + * protected area is affected, because we may have to move processed + * data later, which is much more complicated. + */ + if (buffer_not_empty(req->buf) && msg->msg_state < HTTP_MSG_ERROR) { +- +- /* This point is executed when some data is avalaible for analysis, +- * so we log the end of the idle time. */ +- if (s->logs.t_idle == -1) +- s->logs.t_idle = tv_ms_elapsed(&s->logs.tv_accept, &now) - s->logs.t_handshake; +- + if (txn->flags & TX_NOT_FIRST) { + if (unlikely(!channel_is_rewritable(req))) { + if (req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) diff --git a/net/haproxy/patches/0003-BUG-MEDIUM-lua-Fix-segmentation-fault-if-a-Lua-task-exits.patch b/net/haproxy/patches/0003-BUG-MEDIUM-lua-Fix-segmentation-fault-if-a-Lua-task-exits.patch new file mode 100644 index 000000000..2a52c7cbd --- /dev/null +++ b/net/haproxy/patches/0003-BUG-MEDIUM-lua-Fix-segmentation-fault-if-a-Lua-task-exits.patch @@ -0,0 +1,48 @@ +commit 17f3e16826e5b1a3f79b7421d69bb85be09a4ad9 +Author: Tim Duesterhus +Date: Tue Apr 24 13:56:01 2018 +0200 + + BUG/MEDIUM: lua: Fix segmentation fault if a Lua task exits + + PiBa-NL reported that haproxy crashes with a segmentation fault + if a function registered using `core.register_task` returns. + + An example Lua script that reproduces the bug is: + + mytask = function() + core.Info("Stopping task") + end + core.register_task(mytask) + + The Valgrind output is as follows: + + ==6759== Process terminating with default action of signal 11 (SIGSEGV) + ==6759== Access not within mapped region at address 0x20 + ==6759== at 0x5B60AA9: lua_sethook (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) + ==6759== by 0x430264: hlua_ctx_resume (hlua.c:1009) + ==6759== by 0x43BB68: hlua_process_task (hlua.c:5525) + ==6759== by 0x4FED0A: process_runnable_tasks (task.c:231) + ==6759== by 0x4B2256: run_poll_loop (haproxy.c:2397) + ==6759== by 0x4B2256: run_thread_poll_loop (haproxy.c:2459) + ==6759== by 0x41A7E4: main (haproxy.c:3049) + + Add the missing `task = NULL` for the `HLUA_E_OK` case. The error cases + have been fixed as of 253e53e661c49fb9723535319cf511152bf09bc7 which + first was included in haproxy v1.8-dev3. This bugfix should be backported + to haproxy 1.8. + + (cherry picked from commit cd235c60425dbe66c9015a357369afacc4880211) + Signed-off-by: Christopher Faulet + +diff --git a/src/hlua.c b/src/hlua.c +index 4e759c7c..d4b7ce91 100644 +--- a/src/hlua.c ++++ b/src/hlua.c +@@ -5528,6 +5528,7 @@ static struct task *hlua_process_task(struct task *task) + hlua_ctx_destroy(hlua); + task_delete(task); + task_free(task); ++ task = NULL; + break; + + case HLUA_E_AGAIN: /* co process or timeout wake me later. */ diff --git a/net/haproxy/patches/0004-MINOR-h2-detect-presence-of-CONNECT-and-or-content-length.patch b/net/haproxy/patches/0004-MINOR-h2-detect-presence-of-CONNECT-and-or-content-length.patch new file mode 100644 index 000000000..4369ff04a --- /dev/null +++ b/net/haproxy/patches/0004-MINOR-h2-detect-presence-of-CONNECT-and-or-content-length.patch @@ -0,0 +1,147 @@ +commit a8bcc7dd3fe5aa615f21e795375ff9225f004498 +Author: Willy Tarreau +Date: Wed Apr 25 18:13:58 2018 +0200 + + MINOR: h2: detect presence of CONNECT and/or content-length + + We'll need this in order to support uploading chunks. The h2 to h1 + converter checks for the presence of the content-length header field + as well as the CONNECT method and returns these information to the + caller. The caller indicates whether or not a body is detected for + the message (presence of END_STREAM or not). No transfer-encoding + header is emitted yet. + + (cherry picked from commit 174b06a572ef141f15d8b7ea64eb6b34ec4c9af1) + Signed-off-by: Christopher Faulet + +diff --git a/include/common/h2.h b/include/common/h2.h +index 65c5ab1c..576ed105 100644 +--- a/include/common/h2.h ++++ b/include/common/h2.h +@@ -145,9 +145,15 @@ enum h2_err { + "\x0d\x0a\x53\x4d\x0d\x0a\x0d\x0a" + + ++/* some flags related to protocol parsing */ ++#define H2_MSGF_BODY 0x0001 // a body is present ++#define H2_MSGF_BODY_CL 0x0002 // content-length is present ++#define H2_MSGF_BODY_TUNNEL 0x0004 // a tunnel is in use (CONNECT) ++ ++ + /* various protocol processing functions */ + +-int h2_make_h1_request(struct http_hdr *list, char *out, int osize); ++int h2_make_h1_request(struct http_hdr *list, char *out, int osize, unsigned int *msgf); + + /* + * Some helpful debugging functions. +diff --git a/src/h2.c b/src/h2.c +index 43ed7f3c..7d9ddd50 100644 +--- a/src/h2.c ++++ b/src/h2.c +@@ -36,9 +36,10 @@ + * stored in . indicates what was found so far. This should be + * called once at the detection of the first general header field or at the end + * of the request if no general header field was found yet. Returns 0 on success +- * or a negative error code on failure. ++ * or a negative error code on failure. Upon success, is updated with a ++ * few H2_MSGF_* flags indicating what was found while parsing. + */ +-static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr, char *end) ++static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr, char *end, unsigned int *msgf) + { + char *out = *ptr; + int uri_idx = H2_PHDR_IDX_PATH; +@@ -62,6 +63,7 @@ static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr, + } + // otherwise OK ; let's use the authority instead of the URI + uri_idx = H2_PHDR_IDX_AUTH; ++ *msgf |= H2_MSGF_BODY_TUNNEL; + } + else if ((fields & (H2_PHDR_FND_METH|H2_PHDR_FND_SCHM|H2_PHDR_FND_PATH)) != + (H2_PHDR_FND_METH|H2_PHDR_FND_SCHM|H2_PHDR_FND_PATH)) { +@@ -113,6 +115,10 @@ static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr, + * for a max of bytes, and the amount of bytes emitted is returned. In + * case of error, a negative error code is returned. + * ++ * Upon success, is filled with a few H2_MSGF_* flags indicating what ++ * was found while parsing. The caller must set it to zero in or H2_MSGF_BODY ++ * if a body is detected (!ES). ++ * + * The headers list must be composed of : + * - n.name != NULL, n.len > 0 : literal header name + * - n.name == NULL, n.len > 0 : indexed pseudo header name number +@@ -124,7 +130,7 @@ static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr, + * The Cookie header will be reassembled at the end, and for this, the + * will be used to create a linked list, so its contents may be destroyed. + */ +-int h2_make_h1_request(struct http_hdr *list, char *out, int osize) ++int h2_make_h1_request(struct http_hdr *list, char *out, int osize, unsigned int *msgf) + { + struct ist phdr_val[H2_PHDR_NUM_ENTRIES]; + char *out_end = out + osize; +@@ -176,7 +182,7 @@ int h2_make_h1_request(struct http_hdr *list, char *out, int osize) + /* regular header field in (name,value) */ + if (!(fields & H2_PHDR_FND_NONE)) { + /* no more pseudo-headers, time to build the request line */ +- ret = h2_prepare_h1_reqline(fields, phdr_val, &out, out_end); ++ ret = h2_prepare_h1_reqline(fields, phdr_val, &out, out_end, msgf); + if (ret != 0) + goto leave; + fields |= H2_PHDR_FND_NONE; +@@ -185,6 +191,10 @@ int h2_make_h1_request(struct http_hdr *list, char *out, int osize) + if (isteq(list[idx].n, ist("host"))) + fields |= H2_PHDR_FND_HOST; + ++ if ((*msgf & (H2_MSGF_BODY|H2_MSGF_BODY_TUNNEL|H2_MSGF_BODY_CL)) == H2_MSGF_BODY && ++ isteq(list[idx].n, ist("content-length"))) ++ *msgf |= H2_MSGF_BODY_CL; ++ + /* these ones are forbidden in requests (RFC7540#8.1.2.2) */ + if (isteq(list[idx].n, ist("connection")) || + isteq(list[idx].n, ist("proxy-connection")) || +@@ -232,7 +242,7 @@ int h2_make_h1_request(struct http_hdr *list, char *out, int osize) + + /* Let's dump the request now if not yet emitted. */ + if (!(fields & H2_PHDR_FND_NONE)) { +- ret = h2_prepare_h1_reqline(fields, phdr_val, &out, out_end); ++ ret = h2_prepare_h1_reqline(fields, phdr_val, &out, out_end, msgf); + if (ret != 0) + goto leave; + } +diff --git a/src/mux_h2.c b/src/mux_h2.c +index 4fde7fcc..82dd414a 100644 +--- a/src/mux_h2.c ++++ b/src/mux_h2.c +@@ -2626,6 +2626,7 @@ static int h2_frt_decode_headers(struct h2s *h2s, struct buffer *buf, int count) + struct chunk *tmp = get_trash_chunk(); + struct http_hdr list[MAX_HTTP_HDR * 2]; + struct chunk *copy = NULL; ++ unsigned int msgf; + int flen = h2c->dfl; + int outlen = 0; + int wrap; +@@ -2727,13 +2728,22 @@ static int h2_frt_decode_headers(struct h2s *h2s, struct buffer *buf, int count) + } + + /* OK now we have our header list in */ +- outlen = h2_make_h1_request(list, bi_end(buf), try); ++ msgf = (h2c->dff & H2_F_DATA_END_STREAM) ? 0 : H2_MSGF_BODY; ++ outlen = h2_make_h1_request(list, bi_end(buf), try, &msgf); + + if (outlen < 0) { + h2c_error(h2c, H2_ERR_COMPRESSION_ERROR); + goto fail; + } + ++ if (msgf & H2_MSGF_BODY) { ++ /* a payload is present */ ++ if (msgf & H2_MSGF_BODY_CL) ++ h2s->flags |= H2_SF_DATA_CLEN; ++ else if (!(msgf & H2_MSGF_BODY_TUNNEL)) ++ h2s->flags |= H2_SF_DATA_CHNK; ++ } ++ + /* now consume the input data */ + bi_del(h2c->dbuf, h2c->dfl); + h2c->st0 = H2_CS_FRAME_H; diff --git a/net/haproxy/patches/0005-BUG-MEDIUM-h2-implement-missing-support-for-chunked-encoded-uploads.patch b/net/haproxy/patches/0005-BUG-MEDIUM-h2-implement-missing-support-for-chunked-encoded-uploads.patch new file mode 100644 index 000000000..f2dc3e6f3 --- /dev/null +++ b/net/haproxy/patches/0005-BUG-MEDIUM-h2-implement-missing-support-for-chunked-encoded-uploads.patch @@ -0,0 +1,164 @@ +commit 05657bd24ebaf20e5c508a435be9a0830591f033 +Author: Willy Tarreau +Date: Wed Apr 25 20:44:22 2018 +0200 + + BUG/MEDIUM: h2: implement missing support for chunked encoded uploads + + Upload requests not carrying a content-length nor tunnelling data must + be sent chunked-encoded over HTTP/1. The code was planned but for some + reason forgotten during the implementation, leading to such payloads to + be sent as tunnelled data. + + Browsers always emit a content length in uploads so this problem doesn't + happen for most sites. However some applications may send data frames + after a request without indicating it earlier. + + The only way to detect that a client will need to send data is that the + HEADERS frame doesn't hold the ES bit. In this case it's wise to look + for the content-length header. If it's not there, either we're in tunnel + (CONNECT method) or chunked-encoding (other methods). + + This patch implements this. + + The following request is sent using content-length : + + curl --http2 -sk https://127.0.0.1:4443/s2 -XPOST -T /large/file + + and these ones using chunked-encoding : + + curl --http2 -sk https://127.0.0.1:4443/s2 -XPUT -T /large/file + curl --http2 -sk https://127.0.0.1:4443/s2 -XPUT -T - < /dev/urandom + + Thanks to Robert Samuel Newson for raising this issue with details. + This fix must be backported to 1.8. + + (cherry picked from commit eba10f24b7da27cde60d2db24aeb1147e1657579) + Signed-off-by: Christopher Faulet + +diff --git a/src/h2.c b/src/h2.c +index 7d9ddd50..5c83d6b6 100644 +--- a/src/h2.c ++++ b/src/h2.c +@@ -262,6 +262,14 @@ int h2_make_h1_request(struct http_hdr *list, char *out, int osize, unsigned int + *(out++) = '\n'; + } + ++ if ((*msgf & (H2_MSGF_BODY|H2_MSGF_BODY_TUNNEL|H2_MSGF_BODY_CL)) == H2_MSGF_BODY) { ++ /* add chunked encoding */ ++ if (out + 28 > out_end) ++ goto fail; ++ memcpy(out, "transfer-encoding: chunked\r\n", 28); ++ out += 28; ++ } ++ + /* now we may have to build a cookie list. We'll dump the values of all + * visited headers. + */ +diff --git a/src/mux_h2.c b/src/mux_h2.c +index 82dd414a..5f1da0df 100644 +--- a/src/mux_h2.c ++++ b/src/mux_h2.c +@@ -2785,6 +2785,7 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count) + struct h2c *h2c = h2s->h2c; + int block1, block2; + unsigned int flen = h2c->dfl; ++ unsigned int chklen = 0; + + h2s->cs->flags &= ~CS_FL_RCV_MORE; + h2c->flags &= ~H2_CF_DEM_SFULL; +@@ -2820,14 +2821,35 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count) + return 0; + } + ++ /* chunked-encoding requires more room */ ++ if (h2s->flags & H2_SF_DATA_CHNK) { ++ chklen = MIN(flen, count); ++ chklen = (chklen < 16) ? 1 : (chklen < 256) ? 2 : ++ (chklen < 4096) ? 3 : (chklen < 65536) ? 4 : ++ (chklen < 1048576) ? 4 : 8; ++ chklen += 4; // CRLF, CRLF ++ } ++ + /* does it fit in output buffer or should we wait ? */ +- if (flen > count) { +- flen = count; +- if (!flen) { +- h2c->flags |= H2_CF_DEM_SFULL; +- h2s->cs->flags |= CS_FL_RCV_MORE; +- return 0; +- } ++ if (flen + chklen > count) { ++ if (chklen >= count) ++ goto full; ++ flen = count - chklen; ++ } ++ ++ if (h2s->flags & H2_SF_DATA_CHNK) { ++ /* emit the chunk size */ ++ unsigned int chksz = flen; ++ char str[10]; ++ char *beg; ++ ++ beg = str + sizeof(str); ++ *--beg = '\n'; ++ *--beg = '\r'; ++ do { ++ *--beg = hextab[chksz & 0xF]; ++ } while (chksz >>= 4); ++ bi_putblk(buf, beg, str + sizeof(str) - beg); + } + + /* Block1 is the length of the first block before the buffer wraps, +@@ -2844,6 +2866,11 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count) + if (block2) + bi_putblk(buf, b_ptr(h2c->dbuf, block1), block2); + ++ if (h2s->flags & H2_SF_DATA_CHNK) { ++ /* emit the CRLF */ ++ bi_putblk(buf, "\r\n", 2); ++ } ++ + /* now mark the input data as consumed (will be deleted from the buffer + * by the caller when seeing FRAME_A after sending the window update). + */ +@@ -2854,15 +2881,22 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count) + + if (h2c->dfl > h2c->dpl) { + /* more data available, transfer stalled on stream full */ +- h2c->flags |= H2_CF_DEM_SFULL; +- h2s->cs->flags |= CS_FL_RCV_MORE; +- return flen; ++ goto more; + } + + end_transfer: + /* here we're done with the frame, all the payload (except padding) was + * transferred. + */ ++ ++ if (h2c->dff & H2_F_DATA_END_STREAM && h2s->flags & H2_SF_DATA_CHNK) { ++ /* emit the trailing 0 CRLF CRLF */ ++ if (count < 5) ++ goto more; ++ chklen += 5; ++ bi_putblk(buf, "0\r\n\r\n", 5); ++ } ++ + h2c->rcvd_c += h2c->dpl; + h2c->rcvd_s += h2c->dpl; + h2c->dpl = 0; +@@ -2877,7 +2911,13 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count) + h2s->flags |= H2_SF_ES_RCVD; + } + +- return flen; ++ return flen + chklen; ++ full: ++ flen = chklen = 0; ++ more: ++ h2c->flags |= H2_CF_DEM_SFULL; ++ h2s->cs->flags |= CS_FL_RCV_MORE; ++ return flen + chklen; + } + + /* diff --git a/net/haproxy/patches/0006-BUG-MINOR-lua-threads-Make-luas-tasks-sticky-to-the-current-thread.patch b/net/haproxy/patches/0006-BUG-MINOR-lua-threads-Make-luas-tasks-sticky-to-the-current-thread.patch new file mode 100644 index 000000000..f6f9d55e5 --- /dev/null +++ b/net/haproxy/patches/0006-BUG-MINOR-lua-threads-Make-luas-tasks-sticky-to-the-current-thread.patch @@ -0,0 +1,36 @@ +commit 8b8d55be7e94ee3d758d41a21fa86a036e91a264 +Author: Christopher Faulet +Date: Wed Apr 25 10:34:45 2018 +0200 + + BUG/MINOR: lua/threads: Make lua's tasks sticky to the current thread + + PiBa-NL reported a bug with tasks registered in lua when HAProxy is started with + serveral threads. These tasks have not specific affinity with threads so they + can be woken up on any threads. So, it is impossbile for these tasks to handled + cosockets or applets, because cosockets and applets are sticky on the thread + which created them. It is forbbiden to manipulate a cosocket from another + thread. + + So to fix the bug, tasks registered in lua are now sticky to the current + thread. Because these tasks can be registered before threads creation, the + affinity is set the first time a lua's task is processed. + + This patch must be backported in HAProxy 1.8. + + (cherry picked from commit 5bc9972ed836517924eea91954d255d317a53418) + Signed-off-by: Christopher Faulet + +diff --git a/src/hlua.c b/src/hlua.c +index d4b7ce91..bd0b87e3 100644 +--- a/src/hlua.c ++++ b/src/hlua.c +@@ -5513,6 +5513,9 @@ static struct task *hlua_process_task(struct task *task) + struct hlua *hlua = task->context; + enum hlua_exec status; + ++ if (task->thread_mask == MAX_THREADS_MASK) ++ task_set_affinity(task, tid_bit); ++ + /* If it is the first call to the task, we must initialize the + * execution timeouts. + */ From 842901b4b11fd1421aba2e77aae24ebdceed5ad7 Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Thu, 10 May 2018 20:37:15 +0200 Subject: [PATCH 10/13] haproxy: Update all patches for HAProxy v1.8.8 - Add new patches (see https://www.haproxy.org/bugs/bugs-1.8.8.html) - Raise patch-level to 03 Signed-off-by: Christian Lachner --- net/haproxy/Makefile | 2 +- ...ig-disable-http-reuse-on-TCP-proxies.patch | 31 +++ ...lth-computation-for-flapping-servers.patch | 42 +++ ...-sync-point-for-more-than-32-threads.patch | 48 ++++ ...tasks-to-sleep-when-waiting-for-data.patch | 31 +++ ...documentation-re-servers-array-table.patch | 252 ++++++++++++++++++ ...nce-to-the-last-ref_elt-being-dumped.patch | 31 +++ ...-free-a-task-that-is-about-to-be-run.patch | 128 +++++++++ ...chedule-socket-task-upon-lua-connect.patch | 48 ++++ 9 files changed, 612 insertions(+), 1 deletion(-) create mode 100644 net/haproxy/patches/0007-BUG-MINOR-config-disable-http-reuse-on-TCP-proxies.patch create mode 100644 net/haproxy/patches/0008-BUG-MINOR-checks-Fix-check--health-computation-for-flapping-servers.patch create mode 100644 net/haproxy/patches/0009-BUG-MEDIUM-threads-Fix-the-sync-point-for-more-than-32-threads.patch create mode 100644 net/haproxy/patches/0010-BUG-MINOR-lua-Put-tasks-to-sleep-when-waiting-for-data.patch create mode 100644 net/haproxy/patches/0011-DOC-MINOR-clean-up-LUA-documentation-re-servers-array-table.patch create mode 100644 net/haproxy/patches/0012-BUG-MINOR-map-correctly-track-reference-to-the-last-ref_elt-being-dumped.patch create mode 100644 net/haproxy/patches/0013-BUG-MEDIUM-task-Dont-free-a-task-that-is-about-to-be-run.patch create mode 100644 net/haproxy/patches/0014-BUG-MINOR-lua-schedule-socket-task-upon-lua-connect.patch diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index 44544c97f..61ab5a545 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -10,7 +10,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy PKG_VERSION:=1.8.8 -PKG_RELEASE:=02 +PKG_RELEASE:=03 PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/ diff --git a/net/haproxy/patches/0007-BUG-MINOR-config-disable-http-reuse-on-TCP-proxies.patch b/net/haproxy/patches/0007-BUG-MINOR-config-disable-http-reuse-on-TCP-proxies.patch new file mode 100644 index 000000000..fdca4ea07 --- /dev/null +++ b/net/haproxy/patches/0007-BUG-MINOR-config-disable-http-reuse-on-TCP-proxies.patch @@ -0,0 +1,31 @@ +commit 80e179128cfd78d95cdebf7195fd21299e7931b6 +Author: Willy Tarreau +Date: Sat Apr 28 07:18:15 2018 +0200 + + BUG/MINOR: config: disable http-reuse on TCP proxies + + Louis Chanouha reported an inappropriate warning when http-reuse is + present in a defaults section while a TCP proxy accidently inherits + it and finds a conflict with other options like the use of the PROXY + protocol. To fix this patch removes the http-reuse option for TCP + proxies. + + This fix needs to be backported to 1.8, 1.7 and possibly 1.6. + + (cherry picked from commit 46deab6e64bfda7211b7c3199ad01f136141c86f) + Signed-off-by: Christopher Faulet + +diff --git a/src/cfgparse.c b/src/cfgparse.c +index 5a460381..63d2de58 100644 +--- a/src/cfgparse.c ++++ b/src/cfgparse.c +@@ -8702,6 +8702,9 @@ out_uri_auth_compat: + } + #endif + ++ if ((curproxy->mode != PR_MODE_HTTP) && (curproxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) ++ curproxy->options &= ~PR_O_REUSE_MASK; ++ + if ((curproxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) { + if ((curproxy->conn_src.opts & CO_SRC_TPROXY_MASK) == CO_SRC_TPROXY_CLI || + (curproxy->conn_src.opts & CO_SRC_TPROXY_MASK) == CO_SRC_TPROXY_CIP || diff --git a/net/haproxy/patches/0008-BUG-MINOR-checks-Fix-check--health-computation-for-flapping-servers.patch b/net/haproxy/patches/0008-BUG-MINOR-checks-Fix-check--health-computation-for-flapping-servers.patch new file mode 100644 index 000000000..d2dd8d899 --- /dev/null +++ b/net/haproxy/patches/0008-BUG-MINOR-checks-Fix-check--health-computation-for-flapping-servers.patch @@ -0,0 +1,42 @@ +commit edb5a1efd22eb9918574d962640cd2ae3bb45ad3 +Author: Christopher Faulet +Date: Wed May 2 12:12:45 2018 +0200 + + BUG/MINOR: checks: Fix check->health computation for flapping servers + + This patch fixes an old bug introduced in the commit 7b1d47ce ("MAJOR: checks: + move health checks changes to set_server_check_status()"). When a DOWN server is + flapping, everytime a check succeds, check->health is incremented. But when a + check fails, it is decremented only when it is higher than the rise value. So if + only one check succeds for a DOWN server, check->health will remain set to 1 for + all subsequent failing checks. + + So, at first glance, it seems not that terrible because the server remains + DOWN. But it is reported in the transitional state "DOWN server, going up". And + it will remain in this state until it is UP again. And there is also an + insidious side effect. If a DOWN server is flapping time to time, It will end to + be considered UP after a uniq successful check, , regardless the rise threshold, + because check->health will be increased slowly and never decreased. + + To fix the bug, we just need to reset check->health to 0 when a check fails for + a DOWN server. To do so, we just need to relax the condition to handle a failure + in the function set_server_check_status. + + This patch must be backported to haproxy 1.5 and newer. + + (cherry picked from commit b119a79fc336f2b6074de1c3113b1682c717985c) + Signed-off-by: Willy Tarreau + +diff --git a/src/checks.c b/src/checks.c +index 80a9c70d..d07a82f8 100644 +--- a/src/checks.c ++++ b/src/checks.c +@@ -243,7 +243,7 @@ static void set_server_check_status(struct check *check, short status, const cha + */ + if ((!(check->state & CHK_ST_AGENT) || + (check->status >= HCHK_STATUS_L57DATA)) && +- (check->health >= check->rise)) { ++ (check->health > 0)) { + HA_ATOMIC_ADD(&s->counters.failed_checks, 1); + report = 1; + check->health--; diff --git a/net/haproxy/patches/0009-BUG-MEDIUM-threads-Fix-the-sync-point-for-more-than-32-threads.patch b/net/haproxy/patches/0009-BUG-MEDIUM-threads-Fix-the-sync-point-for-more-than-32-threads.patch new file mode 100644 index 000000000..7574b6907 --- /dev/null +++ b/net/haproxy/patches/0009-BUG-MEDIUM-threads-Fix-the-sync-point-for-more-than-32-threads.patch @@ -0,0 +1,48 @@ +commit 830324444e57c042666b17ac4584352cca85dafd +Author: Christopher Faulet +Date: Wed May 2 16:58:40 2018 +0200 + + BUG/MEDIUM: threads: Fix the sync point for more than 32 threads + + In the sync point, to know if a thread has requested a synchronization, we call + the function thread_need_sync(). It should return 1 if yes, otherwise it should + return 0. It is intended to return a signed integer. + + But internally, instead of returning 0 or 1, it returns 0 or tid_bit + (threads_want_sync & tid_bit). So, tid_bit is casted in integer. For the first + 32 threads, it's ok, because we always check if thread_need_sync() returns + something else than 0. But this is a problem if HAProxy is started with more + than 32 threads, because for threads 33 to 64 (so for tid 32 to 63), their + tid_bit casted to integer are evaluated to 0. So the sync point does not work for + more than 32 threads. + + Now, the function thread_need_sync() respects its contract, returning 0 or + 1. the function thread_no_sync() has also been updated to avoid any ambiguities. + + This patch must be backported in HAProxy 1.8. + + (cherry picked from commit 148b16e1ceb819dfcef4c45828121d9cd7474b35) + Signed-off-by: Willy Tarreau + +diff --git a/src/hathreads.c b/src/hathreads.c +index daf226ce..944a0d5b 100644 +--- a/src/hathreads.c ++++ b/src/hathreads.c +@@ -85,7 +85,7 @@ void thread_want_sync() + /* Returns 1 if no thread has requested a sync. Otherwise, it returns 0. */ + int thread_no_sync() + { +- return (threads_want_sync == 0); ++ return (threads_want_sync == 0UL); + } + + /* Returns 1 if the current thread has requested a sync. Otherwise, it returns +@@ -93,7 +93,7 @@ int thread_no_sync() + */ + int thread_need_sync() + { +- return (threads_want_sync & tid_bit); ++ return ((threads_want_sync & tid_bit) != 0UL); + } + + /* Thread barrier. Synchronizes all threads at the barrier referenced by diff --git a/net/haproxy/patches/0010-BUG-MINOR-lua-Put-tasks-to-sleep-when-waiting-for-data.patch b/net/haproxy/patches/0010-BUG-MINOR-lua-Put-tasks-to-sleep-when-waiting-for-data.patch new file mode 100644 index 000000000..3b298f963 --- /dev/null +++ b/net/haproxy/patches/0010-BUG-MINOR-lua-Put-tasks-to-sleep-when-waiting-for-data.patch @@ -0,0 +1,31 @@ +commit 335bc7b74eee84f0a3bcb615cadd23fe01d1336c +Author: PiBa-NL +Date: Wed May 2 22:27:14 2018 +0200 + + BUG/MINOR: lua: Put tasks to sleep when waiting for data + + If a lua socket is waiting for data it currently spins at 100% cpu usage. + This because the TICK_ETERNITY returned by the socket is ignored when + setting the 'expire' time of the task. + + Fixed by removing the check for yields that return TICK_ETERNITY. + + This should be backported to at least 1.8. + + (cherry picked from commit fe971b35aeca9994f3823112c783aa796e74075a) + Signed-off-by: Willy Tarreau + +diff --git a/src/hlua.c b/src/hlua.c +index bd0b87e3..0100e7cf 100644 +--- a/src/hlua.c ++++ b/src/hlua.c +@@ -5536,8 +5536,7 @@ static struct task *hlua_process_task(struct task *task) + + case HLUA_E_AGAIN: /* co process or timeout wake me later. */ + notification_gc(&hlua->com); +- if (hlua->wake_time != TICK_ETERNITY) +- task->expire = hlua->wake_time; ++ task->expire = hlua->wake_time; + break; + + /* finished with error. */ diff --git a/net/haproxy/patches/0011-DOC-MINOR-clean-up-LUA-documentation-re-servers-array-table.patch b/net/haproxy/patches/0011-DOC-MINOR-clean-up-LUA-documentation-re-servers-array-table.patch new file mode 100644 index 000000000..7f210e533 --- /dev/null +++ b/net/haproxy/patches/0011-DOC-MINOR-clean-up-LUA-documentation-re-servers-array-table.patch @@ -0,0 +1,252 @@ +commit 016feef5483397491af3242162934d9e9dbc6263 +Author: Patrick Hemmer +Date: Tue May 1 21:30:41 2018 -0400 + + DOC/MINOR: clean up LUA documentation re: servers & array/table. + + * A few typos + * Fix definitions of values which are tables, not arrays. + * Consistent US English naming for "server" instead of "serveur". + + [tfo: should be backported to 1.6 and higher] + + (cherry picked from commit c6a1d711a4d47d68611aa28adecdadba96221bde) + Signed-off-by: Willy Tarreau + +diff --git a/doc/lua-api/index.rst b/doc/lua-api/index.rst +index e7aa425d..2d210945 100644 +--- a/doc/lua-api/index.rst ++++ b/doc/lua-api/index.rst +@@ -169,9 +169,9 @@ Core class + + **context**: task, action, sample-fetch, converter + +- This attribute is an array of declared proxies (frontend and backends). Each +- proxy give an access to his list of listeners and servers. Each entry is of +- type :ref:`proxy_class` ++ This attribute is a table of declared proxies (frontend and backends). Each ++ proxy give an access to his list of listeners and servers. The table is ++ indexed by proxy name, and each entry is of type :ref:`proxy_class`. + + Warning, if you are declared frontend and backend with the same name, only one + of these are listed. +@@ -183,12 +183,9 @@ Core class + + **context**: task, action, sample-fetch, converter + +- This attribute is an array of declared proxies with backend capability. Each +- proxy give an access to his list of listeners and servers. Each entry is of +- type :ref:`proxy_class` +- +- Warning, if you are declared frontend and backend with the same name, only one +- of these are listed. ++ This attribute is a table of declared proxies with backend capability. Each ++ proxy give an access to his list of listeners and servers. The table is ++ indexed by the backend name, and each entry is of type :ref:`proxy_class`. + + :see: :js:attr:`core.proxies` + :see: :js:attr:`core.frontends` +@@ -197,12 +194,9 @@ Core class + + **context**: task, action, sample-fetch, converter + +- This attribute is an array of declared proxies with frontend capability. Each +- proxy give an access to his list of listeners and servers. Each entry is of +- type :ref:`proxy_class` +- +- Warning, if you are declared frontend and backend with the same name, only one +- of these are listed. ++ This attribute is a table of declared proxies with frontend capability. Each ++ proxy give an access to his list of listeners and servers. The table is ++ indexed by the frontend name, and each entry is of type :ref:`proxy_class`. + + :see: :js:attr:`core.proxies` + :see: :js:attr:`core.backends` +@@ -336,7 +330,7 @@ Core class + Lua execution or resume, so two consecutive call to the function "now" will + probably returns the same result. + +- :returns: an array which contains two entries "sec" and "usec". "sec" ++ :returns: a table which contains two entries "sec" and "usec". "sec" + contains the current at the epoch format, and "usec" contains the + current microseconds. + +@@ -439,9 +433,12 @@ Core class + + **context**: body, init, task, action, sample-fetch, converter + +- proxies is an array containing the list of all proxies declared in the +- configuration file. Each entry of the proxies array is an object of type +- :ref:`proxy_class` ++ proxies is a table containing the list of all proxies declared in the ++ configuration file. The table is indexed by the proxy name, and each entry ++ of the proxies table is an object of type :ref:`proxy_class`. ++ ++ Warning, if you have declared a frontend and backend with the same name, only ++ one of these are listed. + + .. js:function:: core.register_action(name, actions, func [, nb_args]) + +@@ -852,13 +849,14 @@ Proxy class + + .. js:attribute:: Proxy.servers + +- Contain an array with the attached servers. Each server entry is an object of +- type :ref:`server_class`. ++ Contain a table with the attached servers. The table is indexed by server ++ name, and each server entry is an object of type :ref:`server_class`. + + .. js:attribute:: Proxy.listeners + +- Contain an array with the attached listeners. Each listeners entry is an +- object of type :ref:`listener_class`. ++ Contain a table with the attached listeners. The table is indexed by listener ++ name, and each each listeners entry is an object of type ++ :ref:`listener_class`. + + .. js:function:: Proxy.pause(px) + +@@ -908,21 +906,25 @@ Proxy class + + .. js:function:: Proxy.get_stats(px) + +- Returns an array containg the proxy statistics. The statistics returned are ++ Returns a table containg the proxy statistics. The statistics returned are + not the same if the proxy is frontend or a backend. + + :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated + proxy. +- :returns: a key/value array containing stats ++ :returns: a key/value table containing stats + + .. _server_class: + + Server class + ============ + ++.. js:class:: Server ++ ++ This class provides a way for manipulating servers and retrieving information. ++ + .. js:function:: Server.is_draining(sv) + +- Return true if the server is currently draining stiky connections. ++ Return true if the server is currently draining sticky connections. + + :param class_server sv: A :ref:`server_class` which indicates the manipulated + server. +@@ -930,7 +932,7 @@ Server class + + .. js:function:: Server.set_weight(sv, weight) + +- Dynamically change the weight of the serveur. See the management socket ++ Dynamically change the weight of the server. See the management socket + documentation for more information about the format of the string. + + :param class_server sv: A :ref:`server_class` which indicates the manipulated +@@ -939,7 +941,7 @@ Server class + + .. js:function:: Server.get_weight(sv) + +- This function returns an integer representing the serveur weight. ++ This function returns an integer representing the server weight. + + :param class_server sv: A :ref:`server_class` which indicates the manipulated + server. +@@ -947,16 +949,16 @@ Server class + + .. js:function:: Server.set_addr(sv, addr) + +- Dynamically change the address of the serveur. See the management socket ++ Dynamically change the address of the server. See the management socket + documentation for more information about the format of the string. + + :param class_server sv: A :ref:`server_class` which indicates the manipulated + server. +- :param string weight: A string describing the server address. ++ :param string addr: A string describing the server address. + + .. js:function:: Server.get_addr(sv) + +- Returns a string describing the address of the serveur. ++ Returns a string describing the address of the server. + + :param class_server sv: A :ref:`server_class` which indicates the manipulated + server. +@@ -968,7 +970,7 @@ Server class + + :param class_server sv: A :ref:`server_class` which indicates the manipulated + server. +- :returns: a key/value array containing stats ++ :returns: a key/value table containing stats + + .. js:function:: Server.shut_sess(sv) + +@@ -1085,7 +1087,7 @@ Listener class + + :param class_listener ls: A :ref:`listener_class` which indicates the + manipulated listener. +- :returns: a key/value array containing stats ++ :returns: a key/value table containing stats + + .. _concat_class: + +@@ -1169,7 +1171,7 @@ Fetches class + usage. they are the chapters 7.3.2 to 7.3.6. + + **warning** some sample fetches are not available in some context. These +- limitations are specified in this documentation when theire useful. ++ limitations are specified in this documentation when they're useful. + + :see: :js:attr:`TXN.f` + :see: :js:attr:`TXN.sf` +@@ -1345,13 +1347,13 @@ HTTP class + + .. js:function:: HTTP.req_get_headers(http) + +- Returns an array containing all the request headers. ++ Returns a table containing all the request headers. + + :param class_http http: The related http object. +- :returns: array of headers. ++ :returns: table of headers. + :see: :js:func:`HTTP.res_get_headers` + +- This is the form of the returned array: ++ This is the form of the returned table: + + .. code-block:: lua + +@@ -1366,13 +1368,13 @@ HTTP class + + .. js:function:: HTTP.res_get_headers(http) + +- Returns an array containing all the response headers. ++ Returns a table containing all the response headers. + + :param class_http http: The related http object. +- :returns: array of headers. ++ :returns: table of headers. + :see: :js:func:`HTTP.req_get_headers` + +- This is the form of the returned array: ++ This is the form of the returned table: + + .. code-block:: lua + +@@ -2210,12 +2212,12 @@ AppletHTTP class + + .. js:attribute:: AppletHTTP.headers + +- :returns: array ++ :returns: table + +- The attribute headers returns an array containing the HTTP ++ The attribute headers returns a table containing the HTTP + headers. The header names are always in lower case. As the header name can be + encountered more than once in each request, the value is indexed with 0 as +- first index value. The array have this form: ++ first index value. The table have this form: + + .. code-block:: lua + diff --git a/net/haproxy/patches/0012-BUG-MINOR-map-correctly-track-reference-to-the-last-ref_elt-being-dumped.patch b/net/haproxy/patches/0012-BUG-MINOR-map-correctly-track-reference-to-the-last-ref_elt-being-dumped.patch new file mode 100644 index 000000000..af58e5f44 --- /dev/null +++ b/net/haproxy/patches/0012-BUG-MINOR-map-correctly-track-reference-to-the-last-ref_elt-being-dumped.patch @@ -0,0 +1,31 @@ +commit b2219ae216a141acdf0e2a3f67d2c85aee2a2bc2 +Author: Dragan Dosen +Date: Fri May 4 16:27:15 2018 +0200 + + BUG/MINOR: map: correctly track reference to the last ref_elt being dumped + + The bug was introduced in the commit 8d85aa4 ("BUG/MAJOR: map: fix + segfault during 'show map/acl' on cli"). + + This patch should be backported to 1.8, 1.7 and 1.6. + + (cherry picked from commit 336a11f75571ad46f74a7c6247c13ed44f95da93) + Signed-off-by: Willy Tarreau + +diff --git a/src/map.c b/src/map.c +index f40e4394..a9a1e53c 100644 +--- a/src/map.c ++++ b/src/map.c +@@ -307,9 +307,9 @@ static int cli_io_handler_pat_list(struct appctx *appctx) + * reference to the last ref_elt being dumped. + */ + if (appctx->st2 == STAT_ST_LIST) { +- if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users)) { +- LIST_DEL(&appctx->ctx.sess.bref.users); +- LIST_INIT(&appctx->ctx.sess.bref.users); ++ if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users)) { ++ LIST_DEL(&appctx->ctx.map.bref.users); ++ LIST_INIT(&appctx->ctx.map.bref.users); + } + } + return 1; diff --git a/net/haproxy/patches/0013-BUG-MEDIUM-task-Dont-free-a-task-that-is-about-to-be-run.patch b/net/haproxy/patches/0013-BUG-MEDIUM-task-Dont-free-a-task-that-is-about-to-be-run.patch new file mode 100644 index 000000000..e802c7f6b --- /dev/null +++ b/net/haproxy/patches/0013-BUG-MEDIUM-task-Dont-free-a-task-that-is-about-to-be-run.patch @@ -0,0 +1,128 @@ +commit a0f0db361978154474d76028183647d5991f3b5c +Author: Olivier Houchard +Date: Fri May 4 15:46:16 2018 +0200 + + BUG/MEDIUM: task: Don't free a task that is about to be run. + + While running a task, we may try to delete and free a task that is about to + be run, because it's part of the local tasks list, or because rq_next points + to it. + So flag any task that is in the local tasks list to be deleted, instead of + run, by setting t->process to NULL, and re-make rq_next a global, + thread-local variable, that is modified if we attempt to delete that task. + + Many thanks to PiBa-NL for reporting this and analysing the problem. + + This should be backported to 1.8. + + (cherry picked from commit 9b36cb4a414c22e13d344afbbe70684e9f2f1d49) + Signed-off-by: Willy Tarreau + +diff --git a/include/proto/task.h b/include/proto/task.h +index cbc1a907..c1c4c07e 100644 +--- a/include/proto/task.h ++++ b/include/proto/task.h +@@ -90,6 +90,8 @@ extern unsigned int nb_tasks_cur; + extern unsigned int niced_tasks; /* number of niced tasks in the run queue */ + extern struct pool_head *pool_head_task; + extern struct pool_head *pool_head_notification; ++extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */ ++extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be potentially run */ + + __decl_hathreads(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */ + __decl_hathreads(extern HA_SPINLOCK_T wq_lock); /* spin lock related to wait queue */ +@@ -177,8 +179,11 @@ static inline struct task *__task_unlink_rq(struct task *t) + static inline struct task *task_unlink_rq(struct task *t) + { + HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); +- if (likely(task_in_rq(t))) ++ if (likely(task_in_rq(t))) { ++ if (&t->rq == rq_next) ++ rq_next = eb32sc_next(rq_next, tid_bit); + __task_unlink_rq(t); ++ } + HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); + return t; + } +@@ -230,7 +235,7 @@ static inline struct task *task_new(unsigned long thread_mask) + * Free a task. Its context must have been freed since it will be lost. + * The task count is decremented. + */ +-static inline void task_free(struct task *t) ++static inline void __task_free(struct task *t) + { + pool_free(pool_head_task, t); + if (unlikely(stopping)) +@@ -238,6 +243,18 @@ static inline void task_free(struct task *t) + HA_ATOMIC_SUB(&nb_tasks, 1); + } + ++static inline void task_free(struct task *t) ++{ ++ /* There's no need to protect t->state with a lock, as the task ++ * has to run on the current thread. ++ */ ++ if (t == curr_task || !(t->state & TASK_RUNNING)) ++ __task_free(t); ++ else ++ t->process = NULL; ++} ++ ++ + /* Place into the wait queue, where it may already be. If the expiration + * timer is infinite, do nothing and rely on wake_expired_task to clean up. + */ +diff --git a/src/task.c b/src/task.c +index fd9acf66..3d021bb4 100644 +--- a/src/task.c ++++ b/src/task.c +@@ -39,6 +39,7 @@ unsigned int nb_tasks_cur = 0; /* copy of the tasks count */ + unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */ + + THREAD_LOCAL struct task *curr_task = NULL; /* task currently running or NULL */ ++THREAD_LOCAL struct eb32sc_node *rq_next = NULL; /* Next task to be potentially run */ + + __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) rq_lock); /* spin lock related to run queue */ + __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */ +@@ -186,7 +187,6 @@ void process_runnable_tasks() + struct task *t; + int i; + int max_processed; +- struct eb32sc_node *rq_next; + struct task *local_tasks[16]; + int local_tasks_count; + int final_tasks_count; +@@ -227,8 +227,14 @@ void process_runnable_tasks() + */ + if (likely(t->process == process_stream)) + t = process_stream(t); +- else +- t = t->process(t); ++ else { ++ if (t->process != NULL) ++ t = t->process(t); ++ else { ++ __task_free(t); ++ t = NULL; ++ } ++ } + curr_task = NULL; + + if (likely(t != NULL)) { +@@ -309,8 +315,14 @@ void process_runnable_tasks() + curr_task = t; + if (likely(t->process == process_stream)) + t = process_stream(t); +- else +- t = t->process(t); ++ else { ++ if (t->process != NULL) ++ t = t->process(t); ++ else { ++ __task_free(t); ++ t = NULL; ++ } ++ } + curr_task = NULL; + if (t) + local_tasks[final_tasks_count++] = t; diff --git a/net/haproxy/patches/0014-BUG-MINOR-lua-schedule-socket-task-upon-lua-connect.patch b/net/haproxy/patches/0014-BUG-MINOR-lua-schedule-socket-task-upon-lua-connect.patch new file mode 100644 index 000000000..f7e3cf34d --- /dev/null +++ b/net/haproxy/patches/0014-BUG-MINOR-lua-schedule-socket-task-upon-lua-connect.patch @@ -0,0 +1,48 @@ +commit 52ec3578c3ddc688ae14da3cd3e7e351494603d8 +Author: PiBa-NL +Date: Sat May 5 23:51:42 2018 +0200 + + BUG/MINOR: lua: schedule socket task upon lua connect() + + The parameters like server-address, port and timeout should be set before + process_stream task is called to avoid the stream being 'closed' before it + got initialized properly. This is most clearly visible when running with + tune.lua.forced-yield=1.. So scheduling the task should not be done when + creating the lua socket, but when connect is called. The error + "socket: not yet initialised, you can't set timeouts." would then appear. + + Below code for example also shows this issue, as the sleep will + yield the lua code: + local con = core.tcp() + core.sleep(1) + con:settimeout(10) + + (cherry picked from commit 706d5ee0c366787536213ccd6dea264d20b76a22) + [wt: must be backported to 1.7 and 1.6 as well with a different patch, + see https://www.mail-archive.com/haproxy@formilux.org/msg29924.html] + Signed-off-by: Willy Tarreau + +diff --git a/src/hlua.c b/src/hlua.c +index 0100e7cf..5cc918c9 100644 +--- a/src/hlua.c ++++ b/src/hlua.c +@@ -2415,6 +2415,10 @@ __LJMP static int hlua_socket_connect(struct lua_State *L) + WILL_LJMP(luaL_error(L, "out of memory")); + } + xref_unlock(&socket->xref, peer); ++ ++ task_wakeup(s->task, TASK_WOKEN_INIT); ++ /* Return yield waiting for connection. */ ++ + WILL_LJMP(hlua_yieldk(L, 0, 0, hlua_socket_connect_yield, TICK_ETERNITY, 0)); + + return 0; +@@ -2566,8 +2570,6 @@ __LJMP static int hlua_socket_new(lua_State *L) + strm->flags |= SF_DIRECT | SF_ASSIGNED | SF_ADDR_SET | SF_BE_ASSIGNED; + strm->target = &socket_tcp.obj_type; + +- task_wakeup(strm->task, TASK_WOKEN_INIT); +- /* Return yield waiting for connection. */ + return 1; + + out_fail_stream: From e5a860634b2077b6b9bb00050f83ff20a015ff9a Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Wed, 16 May 2018 14:44:53 +0200 Subject: [PATCH 11/13] haproxy: Update all patches for HAProxy v1.8.8 - Add new patches (see https://www.haproxy.org/bugs/bugs-1.8.8.html) - Raise patch-level to 04 Signed-off-by: Christian Lachner --- net/haproxy/Makefile | 2 +- ...e-large-proxy-IDs-can-be-represented.patch | 38 ++++++++++ ...t-always-abort-transfers-on-CF_SHUTR.patch | 70 +++++++++++++++++++ 3 files changed, 109 insertions(+), 1 deletion(-) create mode 100644 net/haproxy/patches/0015-BUG-MINOR-lua-ensure-large-proxy-IDs-can-be-represented.patch create mode 100644 net/haproxy/patches/0016-BUG-MEDIUM-http-dont-always-abort-transfers-on-CF_SHUTR.patch diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index 61ab5a545..8c3a714e9 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -10,7 +10,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy PKG_VERSION:=1.8.8 -PKG_RELEASE:=03 +PKG_RELEASE:=04 PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/ diff --git a/net/haproxy/patches/0015-BUG-MINOR-lua-ensure-large-proxy-IDs-can-be-represented.patch b/net/haproxy/patches/0015-BUG-MINOR-lua-ensure-large-proxy-IDs-can-be-represented.patch new file mode 100644 index 000000000..dce13e4fa --- /dev/null +++ b/net/haproxy/patches/0015-BUG-MINOR-lua-ensure-large-proxy-IDs-can-be-represented.patch @@ -0,0 +1,38 @@ +commit edb4427ab7c070a16cb9a23460f68b3fc3c041bb +Author: Willy Tarreau +Date: Sun May 6 14:50:09 2018 +0200 + + BUG/MINOR: lua: ensure large proxy IDs can be represented + + In function hlua_fcn_new_proxy() too small a buffer was passed to + snprintf(), resulting in large proxy or listener IDs to make + snprintf() fail. It is unlikely to meet this case but let's fix it + anyway. + + This fix must be backported to all stable branches where it applies. + + (cherry picked from commit 29d698040d6bb56b29c036aeba05f0d52d8ce94b) + Signed-off-by: Willy Tarreau + +diff --git a/src/hlua_fcn.c b/src/hlua_fcn.c +index a8d53d45..1df08f85 100644 +--- a/src/hlua_fcn.c ++++ b/src/hlua_fcn.c +@@ -796,7 +796,7 @@ int hlua_fcn_new_proxy(lua_State *L, struct proxy *px) + struct server *srv; + struct listener *lst; + int lid; +- char buffer[10]; ++ char buffer[17]; + + lua_newtable(L); + +@@ -836,7 +836,7 @@ int hlua_fcn_new_proxy(lua_State *L, struct proxy *px) + if (lst->name) + lua_pushstring(L, lst->name); + else { +- snprintf(buffer, 10, "sock-%d", lid); ++ snprintf(buffer, sizeof(buffer), "sock-%d", lid); + lid++; + lua_pushstring(L, buffer); + } diff --git a/net/haproxy/patches/0016-BUG-MEDIUM-http-dont-always-abort-transfers-on-CF_SHUTR.patch b/net/haproxy/patches/0016-BUG-MEDIUM-http-dont-always-abort-transfers-on-CF_SHUTR.patch new file mode 100644 index 000000000..0605b205d --- /dev/null +++ b/net/haproxy/patches/0016-BUG-MEDIUM-http-dont-always-abort-transfers-on-CF_SHUTR.patch @@ -0,0 +1,70 @@ +commit 1c10e5b1b95142bb3ac385be1e60d8b180b2e99e +Author: Willy Tarreau +Date: Wed May 16 11:35:05 2018 +0200 + + BUG/MEDIUM: http: don't always abort transfers on CF_SHUTR + + Pawel Karoluk reported on Discourse[1] that HTTP/2 breaks url_param. + + Christopher managed to track it down to the HTTP_MSGF_WAIT_CONN flag + which is set there to ensure the connection is validated before sending + the headers, as we may need to rewind the stream and hash again upon + redispatch. What happens is that in the forwarding code we refrain + from forwarding when this flag is set and the connection is not yet + established, and for this we go through the missing_data_or_waiting + path. This exit path was initially designed only to wait for data + from the client, so it rightfully checks whether or not the client + has already closed since in that case it must not wait for more data. + But it also has the side effect of aborting such a transfer if the + client has closed after the request, which is exactly what happens + in H2. + + A study on the code reveals that this whole combined check should + be revisited : while it used to be true that waiting had the same + error conditions as missing data, it's not true anymore. Some other + corner cases were identified, such as the risk to report a server + close instead of a client timeout when waiting for the client to + read the last chunk of data if the shutr is already present, or + the risk to fail a redispatch when a client uploads some data and + closes before the connection establishes. The compression seems to + be at risk of rare issues there if a write to a full buffer is not + yet possible but a shutr is already queued. + + At the moment these risks are extremely unlikely but they do exist, + and their impact is very minor since it mostly concerns an issue not + being optimally handled, and the fixes risk to cause more serious + issues. Thus this patch only focuses on how the HTTP_MSGF_WAIT_CONN + is handled and leaves the rest untouched. + + This patch needs to be backported to 1.8, and could be backported to + earlier versions to properly take care of HTTP/1 requests passing via + url_param which are closed immediately after the headers, though this + is unlikely as this behaviour is only exhibited by scripts. + + [1] https://discourse.haproxy.org/t/haproxy-1-8-x-url-param-issue-in-http2/2482/13 + + (cherry picked from commit ba20dfc50161ba705a746d54ebc1a0a45c46beab) + Signed-off-by: Willy Tarreau + +diff --git a/src/proto_http.c b/src/proto_http.c +index 4c18a27c..b384cef1 100644 +--- a/src/proto_http.c ++++ b/src/proto_http.c +@@ -4865,7 +4865,8 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit) + if (!(s->res.flags & CF_READ_ATTACHED)) { + channel_auto_connect(req); + req->flags |= CF_WAKE_CONNECT; +- goto missing_data_or_waiting; ++ channel_dont_close(req); /* don't fail on early shutr */ ++ goto waiting; + } + msg->flags &= ~HTTP_MSGF_WAIT_CONN; + } +@@ -4949,6 +4950,7 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit) + goto return_bad_req_stats_ok; + } + ++ waiting: + /* waiting for the last bits to leave the buffer */ + if (req->flags & CF_SHUTW) + goto aborted_xfer; From 567135a239215c471ea15f5dbb38f89f901f073c Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Fri, 18 May 2018 14:01:18 +0200 Subject: [PATCH 12/13] haproxy: Update all patches for HAProxy v1.8.8+5 - Add new patches (see https://www.haproxy.org/bugs/bugs-1.8.8.html) - Raise patch-level to 05 Signed-off-by: Christian Lachner --- net/haproxy/Makefile | 2 +- ...l-list-for-fd-shared-between-threads.patch | 709 ++++++++++++++++++ ...properly-protect-SSL-cert-generation.patch | 45 ++ 3 files changed, 755 insertions(+), 1 deletion(-) create mode 100644 net/haproxy/patches/0017-BUG-MEDIUM-pollers-Use-a-global-list-for-fd-shared-between-threads.patch create mode 100644 net/haproxy/patches/0018-BUG-MEDIUM-ssl-properly-protect-SSL-cert-generation.patch diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index 8c3a714e9..fb213260c 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -10,7 +10,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy PKG_VERSION:=1.8.8 -PKG_RELEASE:=04 +PKG_RELEASE:=05 PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/ diff --git a/net/haproxy/patches/0017-BUG-MEDIUM-pollers-Use-a-global-list-for-fd-shared-between-threads.patch b/net/haproxy/patches/0017-BUG-MEDIUM-pollers-Use-a-global-list-for-fd-shared-between-threads.patch new file mode 100644 index 000000000..b3ae30e51 --- /dev/null +++ b/net/haproxy/patches/0017-BUG-MEDIUM-pollers-Use-a-global-list-for-fd-shared-between-threads.patch @@ -0,0 +1,709 @@ +commit 954db1d01a3d706d4cacd288f28e8517a635d36e +Author: Olivier Houchard +Date: Thu May 17 18:34:02 2018 +0200 + + BUG/MEDIUM: pollers: Use a global list for fd shared between threads. + + With the old model, any fd shared by multiple threads, such as listeners + or dns sockets, would only be updated on one threads, so that could lead + to missed event, or spurious wakeups. + To avoid this, add a global list for fd that are shared, and only remove + entries from this list when every thread as updated its poller. + This subtly changes the semantics of updt_fd_polling(), as it now unlocks + the FD_LOCK on exit. + + This is similar in spirit to commit 6b96f7289c2f401deef4bdc6e20792360807dde4 + (with the bugfix from c55b88ece616afe0b28dc81eb39bad37b5f9c33f) applied, + but had to be rewrote, because of the differences between 1.8 and master. + + This should only be applied to 1.8. + +diff --git a/include/common/hathreads.h b/include/common/hathreads.h +index 325a869a..86db4d5c 100644 +--- a/include/common/hathreads.h ++++ b/include/common/hathreads.h +@@ -201,6 +201,8 @@ void thread_exit_sync(void); + int thread_no_sync(void); + int thread_need_sync(void); + ++extern unsigned long all_threads_mask; ++ + #if defined(DEBUG_THREAD) || defined(DEBUG_FULL) + + /* WARNING!!! if you update this enum, please also keep lock_label() up to date below */ +@@ -209,6 +211,7 @@ enum lock_label { + FDTAB_LOCK, + FDCACHE_LOCK, + FD_LOCK, ++ FD_UPDATE_LOCK, + POLL_LOCK, + TASK_RQ_LOCK, + TASK_WQ_LOCK, +@@ -330,6 +333,7 @@ static inline const char *lock_label(enum lock_label label) + case FDCACHE_LOCK: return "FDCACHE"; + case FD_LOCK: return "FD"; + case FDTAB_LOCK: return "FDTAB"; ++ case FD_UPDATE_LOCK: return "FD_UPDATE"; + case POLL_LOCK: return "POLL"; + case TASK_RQ_LOCK: return "TASK_RQ"; + case TASK_WQ_LOCK: return "TASK_WQ"; +diff --git a/include/proto/fd.h b/include/proto/fd.h +index bb91bb2c..b6199ccf 100644 +--- a/include/proto/fd.h ++++ b/include/proto/fd.h +@@ -43,6 +43,9 @@ extern THREAD_LOCAL int fd_nbupdt; // number of updates in the list + __decl_hathreads(extern HA_SPINLOCK_T __attribute__((aligned(64))) fdtab_lock); /* global lock to protect fdtab array */ + __decl_hathreads(extern HA_RWLOCK_T __attribute__((aligned(64))) fdcache_lock); /* global lock to protect fd_cache array */ + __decl_hathreads(extern HA_SPINLOCK_T __attribute__((aligned(64))) poll_lock); /* global lock to protect poll info */ ++__decl_hathreads(extern HA_SPINLOCK_T __attribute__((aligned(64))) fd_updt_lock); /* global lock to protect the update list */ ++ ++extern struct fdlist update_list; // Global update list + + /* Deletes an FD from the fdsets, and recomputes the maxfd limit. + * The file descriptor is also closed. +@@ -96,14 +99,70 @@ void fd_process_cached_events(); + + /* Mark fd as updated for polling and allocate an entry in the update list + * for this if it was not already there. This can be done at any time. ++ * This function expects the FD lock to be locked, and returns with the ++ * FD lock unlocked. + */ + static inline void updt_fd_polling(const int fd) + { +- if (fdtab[fd].update_mask & tid_bit) ++ if ((fdtab[fd].update_mask & fdtab[fd].thread_mask) == ++ fdtab[fd].thread_mask) { ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + /* already scheduled for update */ + return; +- fdtab[fd].update_mask |= tid_bit; +- fd_updt[fd_nbupdt++] = fd; ++ } ++ if (fdtab[fd].thread_mask == tid_bit) { ++ fdtab[fd].update_mask |= tid_bit; ++ fd_updt[fd_nbupdt++] = fd; ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ } else { ++ /* This is ugly, but we can afford to unlock the FD lock ++ * before we acquire the fd_updt_lock, to prevent a ++ * lock order reversal, because this function is only called ++ * from fd_update_cache(), and all users of fd_update_cache() ++ * used to just unlock the fd lock just after, anyway. ++ */ ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock); ++ /* If update_mask is non-nul, then it's already in the list ++ * so we don't have to add it. ++ */ ++ if (fdtab[fd].update_mask == 0) { ++ if (update_list.first == -1) { ++ update_list.first = update_list.last = fd; ++ fdtab[fd].update.next = fdtab[fd].update.prev = -1; ++ } else { ++ fdtab[update_list.last].update.next = fd; ++ fdtab[fd].update.prev = update_list.last; ++ fdtab[fd].update.next = -1; ++ update_list.last = fd; ++ } ++ } ++ fdtab[fd].update_mask |= fdtab[fd].thread_mask; ++ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock); ++ ++ } ++} ++ ++/* Called from the poller to acknoledge we read an entry from the global ++ * update list, to remove our bit from the update_mask, and remove it from ++ * the list if we were the last one. ++ */ ++/* Expects to be called with the FD lock and the FD update lock held */ ++static inline void done_update_polling(int fd) ++{ ++ fdtab[fd].update_mask &= ~tid_bit; ++ if ((fdtab[fd].update_mask & all_threads_mask) == 0) { ++ if (fdtab[fd].update.prev != -1) ++ fdtab[fdtab[fd].update.prev].update.next = ++ fdtab[fd].update.next; ++ else ++ update_list.first = fdtab[fd].update.next; ++ if (fdtab[fd].update.next != -1) ++ fdtab[fdtab[fd].update.next].update.prev = ++ fdtab[fd].update.prev; ++ else ++ update_list.last = fdtab[fd].update.prev; ++ } + } + + +@@ -175,13 +234,6 @@ static inline int fd_compute_new_polled_status(int state) + */ + static inline void fd_update_cache(int fd) + { +- /* 3 states for each direction require a polling update */ +- if ((fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_ACTIVE_R)) == FD_EV_POLLED_R || +- (fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_READY_R | FD_EV_ACTIVE_R)) == FD_EV_ACTIVE_R || +- (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_ACTIVE_W)) == FD_EV_POLLED_W || +- (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_READY_W | FD_EV_ACTIVE_W)) == FD_EV_ACTIVE_W) +- updt_fd_polling(fd); +- + /* only READY and ACTIVE states (the two with both flags set) require a cache entry */ + if (((fdtab[fd].state & (FD_EV_READY_R | FD_EV_ACTIVE_R)) == (FD_EV_READY_R | FD_EV_ACTIVE_R)) || + ((fdtab[fd].state & (FD_EV_READY_W | FD_EV_ACTIVE_W)) == (FD_EV_READY_W | FD_EV_ACTIVE_W))) { +@@ -190,6 +242,14 @@ static inline void fd_update_cache(int fd) + else { + fd_release_cache_entry(fd); + } ++ /* 3 states for each direction require a polling update */ ++ if ((fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_ACTIVE_R)) == FD_EV_POLLED_R || ++ (fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_READY_R | FD_EV_ACTIVE_R)) == FD_EV_ACTIVE_R || ++ (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_ACTIVE_W)) == FD_EV_POLLED_W || ++ (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_READY_W | FD_EV_ACTIVE_W)) == FD_EV_ACTIVE_W) ++ updt_fd_polling(fd); ++ else ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + } + + /* +@@ -271,8 +331,9 @@ static inline void fd_stop_recv(int fd) + if (fd_recv_active(fd)) { + fdtab[fd].state &= ~FD_EV_ACTIVE_R; + fd_update_cache(fd); /* need an update entry to change the state */ +- } +- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ /* the FD lock is unlocked by fd_update_cache() */ ++ } else ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + } + + /* Disable processing send events on fd */ +@@ -282,8 +343,9 @@ static inline void fd_stop_send(int fd) + if (fd_send_active(fd)) { + fdtab[fd].state &= ~FD_EV_ACTIVE_W; + fd_update_cache(fd); /* need an update entry to change the state */ +- } +- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ /* the FD lock is unlocked by fd_update_cache() */ ++ } else ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + } + + /* Disable processing of events on fd for both directions. */ +@@ -293,8 +355,9 @@ static inline void fd_stop_both(int fd) + if (fd_active(fd)) { + fdtab[fd].state &= ~FD_EV_ACTIVE_RW; + fd_update_cache(fd); /* need an update entry to change the state */ +- } +- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ /* the FD lock is unlocked by fd_update_cache() */ ++ } else ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + } + + /* Report that FD cannot receive anymore without polling (EAGAIN detected). */ +@@ -304,8 +367,9 @@ static inline void fd_cant_recv(const int fd) + if (fd_recv_ready(fd)) { + fdtab[fd].state &= ~FD_EV_READY_R; + fd_update_cache(fd); /* need an update entry to change the state */ +- } +- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ /* the FD lock is unlocked by fd_update_cache() */ ++ } else ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + } + + /* Report that FD can receive anymore without polling. */ +@@ -315,8 +379,9 @@ static inline void fd_may_recv(const int fd) + if (!fd_recv_ready(fd)) { + fdtab[fd].state |= FD_EV_READY_R; + fd_update_cache(fd); /* need an update entry to change the state */ +- } +- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ /* the FD lock is unlocked by fd_update_cache() */ ++ } else ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + } + + /* Disable readiness when polled. This is useful to interrupt reading when it +@@ -330,8 +395,9 @@ static inline void fd_done_recv(const int fd) + if (fd_recv_polled(fd) && fd_recv_ready(fd)) { + fdtab[fd].state &= ~FD_EV_READY_R; + fd_update_cache(fd); /* need an update entry to change the state */ +- } +- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ /* the FD lock is unlocked by fd_update_cache() */ ++ } else ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + } + + /* Report that FD cannot send anymore without polling (EAGAIN detected). */ +@@ -341,8 +407,9 @@ static inline void fd_cant_send(const int fd) + if (fd_send_ready(fd)) { + fdtab[fd].state &= ~FD_EV_READY_W; + fd_update_cache(fd); /* need an update entry to change the state */ +- } +- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ /* the FD lock is unlocked by fd_update_cache() */ ++ } else ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + } + + /* Report that FD can send anymore without polling (EAGAIN detected). */ +@@ -352,8 +419,9 @@ static inline void fd_may_send(const int fd) + if (!fd_send_ready(fd)) { + fdtab[fd].state |= FD_EV_READY_W; + fd_update_cache(fd); /* need an update entry to change the state */ +- } +- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ /* the FD lock is unlocked by fd_update_cache() */ ++ } else ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + } + + /* Prepare FD to try to receive */ +@@ -363,8 +431,9 @@ static inline void fd_want_recv(int fd) + if (!fd_recv_active(fd)) { + fdtab[fd].state |= FD_EV_ACTIVE_R; + fd_update_cache(fd); /* need an update entry to change the state */ +- } +- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ /* the FD lock is unlocked by fd_update_cache() */ ++ } else ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + } + + /* Prepare FD to try to send */ +@@ -374,8 +443,9 @@ static inline void fd_want_send(int fd) + if (!fd_send_active(fd)) { + fdtab[fd].state |= FD_EV_ACTIVE_W; + fd_update_cache(fd); /* need an update entry to change the state */ +- } +- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ /* the FD lock is unlocked by fd_update_cache() */ ++ } else ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + } + + /* Update events seen for FD and its state if needed. This should be called +diff --git a/include/types/fd.h b/include/types/fd.h +index 9f2c5fee..8e34c624 100644 +--- a/include/types/fd.h ++++ b/include/types/fd.h +@@ -90,11 +90,24 @@ enum fd_states { + */ + #define DEAD_FD_MAGIC 0xFDDEADFD + ++struct fdlist_entry { ++ int next; ++ int prev; ++} __attribute__ ((aligned(8))); ++ ++/* head of the fd list */ ++struct fdlist { ++ int first; ++ int last; ++} __attribute__ ((aligned(8))); ++ ++ + /* info about one given fd */ + struct fdtab { + __decl_hathreads(HA_SPINLOCK_T lock); + unsigned long thread_mask; /* mask of thread IDs authorized to process the task */ + unsigned long polled_mask; /* mask of thread IDs currently polling this fd */ ++ struct fdlist_entry update; /* Entry in the global update list */ + unsigned long update_mask; /* mask of thread IDs having an update for fd */ + void (*iocb)(int fd); /* I/O handler */ + void *owner; /* the connection or listener associated with this fd, NULL if closed */ +diff --git a/src/ev_epoll.c b/src/ev_epoll.c +index 124b8163..adc15acd 100644 +--- a/src/ev_epoll.c ++++ b/src/ev_epoll.c +@@ -59,13 +59,51 @@ REGPRM1 static void __fd_clo(int fd) + } + } + ++static void _update_fd(int fd) ++{ ++ int en, opcode; ++ ++ en = fdtab[fd].state; ++ ++ if (fdtab[fd].polled_mask & tid_bit) { ++ if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) { ++ /* fd removed from poll list */ ++ opcode = EPOLL_CTL_DEL; ++ HA_ATOMIC_AND(&fdtab[fd].polled_mask, ~tid_bit); ++ } ++ else { ++ /* fd status changed */ ++ opcode = EPOLL_CTL_MOD; ++ } ++ } ++ else if ((fdtab[fd].thread_mask & tid_bit) && (en & FD_EV_POLLED_RW)) { ++ /* new fd in the poll list */ ++ opcode = EPOLL_CTL_ADD; ++ HA_ATOMIC_OR(&fdtab[fd].polled_mask, tid_bit); ++ } ++ else { ++ return; ++ } ++ ++ /* construct the epoll events based on new state */ ++ ev.events = 0; ++ if (en & FD_EV_POLLED_R) ++ ev.events |= EPOLLIN | EPOLLRDHUP; ++ ++ if (en & FD_EV_POLLED_W) ++ ev.events |= EPOLLOUT; ++ ++ ev.data.fd = fd; ++ epoll_ctl(epoll_fd[tid], opcode, fd, &ev); ++} ++ + /* + * Linux epoll() poller + */ + REGPRM2 static void _do_poll(struct poller *p, int exp) + { + int status, eo, en; +- int fd, opcode; ++ int fd; + int count; + int updt_idx; + int wait_time; +@@ -89,39 +127,31 @@ REGPRM2 static void _do_poll(struct poller *p, int exp) + en = fd_compute_new_polled_status(eo); + fdtab[fd].state = en; + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); +- +- if (fdtab[fd].polled_mask & tid_bit) { +- if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) { +- /* fd removed from poll list */ +- opcode = EPOLL_CTL_DEL; +- HA_ATOMIC_AND(&fdtab[fd].polled_mask, ~tid_bit); +- } +- else { +- /* fd status changed */ +- opcode = EPOLL_CTL_MOD; +- } +- } +- else if ((fdtab[fd].thread_mask & tid_bit) && (en & FD_EV_POLLED_RW)) { +- /* new fd in the poll list */ +- opcode = EPOLL_CTL_ADD; +- HA_ATOMIC_OR(&fdtab[fd].polled_mask, tid_bit); +- } ++ _update_fd(fd); ++ } ++ fd_nbupdt = 0; ++ /* Scan the global update list */ ++ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock); ++ for (fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { ++ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); ++ if (fdtab[fd].update_mask & tid_bit) ++ done_update_polling(fd); + else { ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + continue; + } ++ fdtab[fd].new = 0; + +- /* construct the epoll events based on new state */ +- ev.events = 0; +- if (en & FD_EV_POLLED_R) +- ev.events |= EPOLLIN | EPOLLRDHUP; +- +- if (en & FD_EV_POLLED_W) +- ev.events |= EPOLLOUT; ++ eo = fdtab[fd].state; ++ en = fd_compute_new_polled_status(eo); ++ fdtab[fd].state = en; + +- ev.data.fd = fd; +- epoll_ctl(epoll_fd[tid], opcode, fd, &ev); ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ if (!fdtab[fd].owner) ++ continue; ++ _update_fd(fd); + } +- fd_nbupdt = 0; ++ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock); + + /* compute the epoll_wait() timeout */ + if (!exp) +@@ -208,8 +238,10 @@ static int init_epoll_per_thread() + * fd for this thread. Let's just mark them as updated, the poller will + * do the rest. + */ +- for (fd = 0; fd < maxfd; fd++) ++ for (fd = 0; fd < maxfd; fd++) { ++ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + updt_fd_polling(fd); ++ } + + return 1; + fail_fd: +diff --git a/src/ev_kqueue.c b/src/ev_kqueue.c +index 8cd6dd84..642de8b3 100644 +--- a/src/ev_kqueue.c ++++ b/src/ev_kqueue.c +@@ -33,6 +33,41 @@ static int kqueue_fd[MAX_THREADS]; // per-thread kqueue_fd + static THREAD_LOCAL struct kevent *kev = NULL; + static struct kevent *kev_out = NULL; // Trash buffer for kevent() to write the eventlist in + ++static int _update_fd(int fd, int start) ++{ ++ int en; ++ int changes = start; ++ ++ en = fdtab[fd].state; ++ ++ if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) { ++ if (!(fdtab[fd].polled_mask & tid_bit)) { ++ /* fd was not watched, it's still not */ ++ return 0; ++ } ++ /* fd totally removed from poll list */ ++ EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); ++ EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); ++ HA_ATOMIC_AND(&fdtab[fd].polled_mask, ~tid_bit); ++ } ++ else { ++ /* OK fd has to be monitored, it was either added or changed */ ++ ++ if (en & FD_EV_POLLED_R) ++ EV_SET(&kev[changes++], fd, EVFILT_READ, EV_ADD, 0, 0, NULL); ++ else if (fdtab[fd].polled_mask & tid_bit) ++ EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); ++ ++ if (en & FD_EV_POLLED_W) ++ EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL); ++ else if (fdtab[fd].polled_mask & tid_bit) ++ EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); ++ ++ HA_ATOMIC_OR(&fdtab[fd].polled_mask, tid_bit); ++ } ++ return changes; ++} ++ + /* + * kqueue() poller + */ +@@ -66,32 +101,32 @@ REGPRM2 static void _do_poll(struct poller *p, int exp) + fdtab[fd].state = en; + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + +- if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) { +- if (!(fdtab[fd].polled_mask & tid_bit)) { +- /* fd was not watched, it's still not */ +- continue; +- } +- /* fd totally removed from poll list */ +- EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); +- EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); +- HA_ATOMIC_AND(&fdtab[fd].polled_mask, ~tid_bit); +- } +- else { +- /* OK fd has to be monitored, it was either added or changed */ ++ changes = _update_fd(fd, changes); ++ } + +- if (en & FD_EV_POLLED_R) +- EV_SET(&kev[changes++], fd, EVFILT_READ, EV_ADD, 0, 0, NULL); +- else if (fdtab[fd].polled_mask & tid_bit) +- EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); ++ /* Scan the global update list */ ++ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock); ++ for (fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { ++ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); ++ if (fdtab[fd].update_mask & tid_bit) ++ done_update_polling(fd); ++ else { ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ continue; ++ } ++ fdtab[fd].new = 0; + +- if (en & FD_EV_POLLED_W) +- EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL); +- else if (fdtab[fd].polled_mask & tid_bit) +- EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); ++ eo = fdtab[fd].state; ++ en = fd_compute_new_polled_status(eo); ++ fdtab[fd].state = en; + +- HA_ATOMIC_OR(&fdtab[fd].polled_mask, tid_bit); +- } ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ if (!fdtab[fd].owner) ++ continue; ++ changes = _update_fd(fd, changes); + } ++ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock); ++ + if (changes) { + #ifdef EV_RECEIPT + kev[0].flags |= EV_RECEIPT; +@@ -189,8 +224,10 @@ static int init_kqueue_per_thread() + * fd for this thread. Let's just mark them as updated, the poller will + * do the rest. + */ +- for (fd = 0; fd < maxfd; fd++) ++ for (fd = 0; fd < maxfd; fd++) { ++ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); + updt_fd_polling(fd); ++ } + + return 1; + fail_fd: +diff --git a/src/ev_poll.c b/src/ev_poll.c +index b7cc0bb3..c913ced2 100644 +--- a/src/ev_poll.c ++++ b/src/ev_poll.c +@@ -104,6 +104,51 @@ REGPRM2 static void _do_poll(struct poller *p, int exp) + HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock); + } + } ++ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock); ++ for (fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { ++ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); ++ if (fdtab[fd].update_mask & tid_bit) { ++ /* Cheat a bit, as the state is global to all pollers ++ * we don't need every thread ot take care of the ++ * update. ++ */ ++ fdtab[fd].update_mask &= ~all_threads_mask; ++ done_update_polling(fd); ++ } else { ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ continue; ++ } ++ ++ if (!fdtab[fd].owner) { ++ activity[tid].poll_drop++; ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ continue; ++ } ++ ++ fdtab[fd].new = 0; ++ ++ eo = fdtab[fd].state; ++ en = fd_compute_new_polled_status(eo); ++ fdtab[fd].state = en; ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ ++ if ((eo ^ en) & FD_EV_POLLED_RW) { ++ /* poll status changed, update the lists */ ++ HA_SPIN_LOCK(POLL_LOCK, &poll_lock); ++ if ((eo & ~en) & FD_EV_POLLED_R) ++ hap_fd_clr(fd, fd_evts[DIR_RD]); ++ else if ((en & ~eo) & FD_EV_POLLED_R) ++ hap_fd_set(fd, fd_evts[DIR_RD]); ++ ++ if ((eo & ~en) & FD_EV_POLLED_W) ++ hap_fd_clr(fd, fd_evts[DIR_WR]); ++ else if ((en & ~eo) & FD_EV_POLLED_W) ++ hap_fd_set(fd, fd_evts[DIR_WR]); ++ HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock); ++ } ++ ++ } ++ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock); + fd_nbupdt = 0; + + nbfd = 0; +diff --git a/src/ev_select.c b/src/ev_select.c +index 5f3486ed..bde923ea 100644 +--- a/src/ev_select.c ++++ b/src/ev_select.c +@@ -70,7 +70,42 @@ REGPRM2 static void _do_poll(struct poller *p, int exp) + en = fd_compute_new_polled_status(eo); + fdtab[fd].state = en; + HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ if ((eo ^ en) & FD_EV_POLLED_RW) { ++ /* poll status changed, update the lists */ ++ HA_SPIN_LOCK(POLL_LOCK, &poll_lock); ++ if ((eo & ~en) & FD_EV_POLLED_R) ++ FD_CLR(fd, fd_evts[DIR_RD]); ++ else if ((en & ~eo) & FD_EV_POLLED_R) ++ FD_SET(fd, fd_evts[DIR_RD]); ++ ++ if ((eo & ~en) & FD_EV_POLLED_W) ++ FD_CLR(fd, fd_evts[DIR_WR]); ++ else if ((en & ~eo) & FD_EV_POLLED_W) ++ FD_SET(fd, fd_evts[DIR_WR]); ++ HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock); ++ } ++ } ++ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock); ++ for (fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { ++ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); ++ if (fdtab[fd].update_mask & tid_bit) { ++ /* Cheat a bit, as the state is global to all pollers ++ * we don't need every thread ot take care of the ++ * update. ++ */ ++ fdtab[fd].update_mask &= ~all_threads_mask; ++ done_update_polling(fd); ++ } else { ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); ++ continue; ++ } + ++ fdtab[fd].new = 0; ++ ++ eo = fdtab[fd].state; ++ en = fd_compute_new_polled_status(eo); ++ fdtab[fd].state = en; ++ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); + if ((eo ^ en) & FD_EV_POLLED_RW) { + /* poll status changed, update the lists */ + HA_SPIN_LOCK(POLL_LOCK, &poll_lock); +@@ -85,7 +120,9 @@ REGPRM2 static void _do_poll(struct poller *p, int exp) + FD_SET(fd, fd_evts[DIR_WR]); + HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock); + } ++ + } ++ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock); + fd_nbupdt = 0; + + /* let's restore fdset state */ +diff --git a/src/fd.c b/src/fd.c +index b64130ed..a134e93e 100644 +--- a/src/fd.c ++++ b/src/fd.c +@@ -175,9 +175,12 @@ unsigned long fd_cache_mask = 0; // Mask of threads with events in the cache + THREAD_LOCAL int *fd_updt = NULL; // FD updates list + THREAD_LOCAL int fd_nbupdt = 0; // number of updates in the list + ++struct fdlist update_list; // Global update list + __decl_hathreads(HA_SPINLOCK_T fdtab_lock); /* global lock to protect fdtab array */ + __decl_hathreads(HA_RWLOCK_T fdcache_lock); /* global lock to protect fd_cache array */ + __decl_hathreads(HA_SPINLOCK_T poll_lock); /* global lock to protect poll info */ ++__decl_hathreads(HA_SPINLOCK_T) fd_updt_lock; /* global lock to protect the update list */ ++ + + /* Deletes an FD from the fdsets, and recomputes the maxfd limit. + * The file descriptor is also closed. +@@ -341,6 +344,9 @@ int init_pollers() + HA_SPIN_INIT(&fdtab_lock); + HA_RWLOCK_INIT(&fdcache_lock); + HA_SPIN_INIT(&poll_lock); ++ HA_SPIN_INIT(&fd_updt_lock); ++ update_list.first = update_list.last = -1; ++ + do { + bp = NULL; + for (p = 0; p < nbpollers; p++) +diff --git a/src/hathreads.c b/src/hathreads.c +index 944a0d5b..66ed482a 100644 +--- a/src/hathreads.c ++++ b/src/hathreads.c +@@ -31,7 +31,7 @@ void thread_sync_io_handler(int fd) + static HA_SPINLOCK_T sync_lock; + static int threads_sync_pipe[2]; + static unsigned long threads_want_sync = 0; +-static unsigned long all_threads_mask = 0; ++unsigned long all_threads_mask = 0; + + #if defined(DEBUG_THREAD) || defined(DEBUG_FULL) + struct lock_stat lock_stats[LOCK_LABELS]; diff --git a/net/haproxy/patches/0018-BUG-MEDIUM-ssl-properly-protect-SSL-cert-generation.patch b/net/haproxy/patches/0018-BUG-MEDIUM-ssl-properly-protect-SSL-cert-generation.patch new file mode 100644 index 000000000..b94bce939 --- /dev/null +++ b/net/haproxy/patches/0018-BUG-MEDIUM-ssl-properly-protect-SSL-cert-generation.patch @@ -0,0 +1,45 @@ +commit f571613244e4c02ca7aada30c89a6244d09d58d4 +Author: Willy Tarreau +Date: Thu May 17 10:56:47 2018 +0200 + + BUG/MEDIUM: ssl: properly protect SSL cert generation + + Commit 821bb9b ("MAJOR: threads/ssl: Make SSL part thread-safe") added + insufficient locking to the cert lookup and generation code : it uses + lru64_lookup(), which will automatically remove and add a list element + to the LRU list. It cannot be simply read-locked. + + A long-term improvement should consist in using a lockless mechanism + in lru64_lookup() to safely move the list element at the head. For now + let's simply use a write lock during the lookup. The effect will be + minimal since it's used only in conjunction with automatically generated + certificates, which are much more expensive and rarely used. + + This fix must be backported to 1.8. + + (cherry picked from commit 03f4ec47d9ffff629b07dcba9f0f134a7c7e44b2) + Signed-off-by: William Lallemand + +diff --git a/src/ssl_sock.c b/src/ssl_sock.c +index 1196d111..9fb2bb15 100644 +--- a/src/ssl_sock.c ++++ b/src/ssl_sock.c +@@ -1812,15 +1812,15 @@ ssl_sock_assign_generated_cert(unsigned int key, struct bind_conf *bind_conf, SS + struct lru64 *lru = NULL; + + if (ssl_ctx_lru_tree) { +- HA_RWLOCK_RDLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); ++ HA_RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); + lru = lru64_lookup(key, ssl_ctx_lru_tree, bind_conf->ca_sign_cert, 0); + if (lru && lru->domain) { + if (ssl) + SSL_set_SSL_CTX(ssl, (SSL_CTX *)lru->data); +- HA_RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); ++ HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); + return (SSL_CTX *)lru->data; + } +- HA_RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); ++ HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); + } + return NULL; + } From 4ae1c3307adba7eeb5c9f90462cd326dd285e96b Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Sat, 19 May 2018 10:14:50 +0200 Subject: [PATCH 13/13] haproxy: Update HAProxy to v1.8.9 - Update haproxy download URL and hash - Removed all obsolete patches - Added logic to Makefile to only append the patch-version to the HA-Proxy version if we actually applied any patches (PKG_RELEASE!=00) Signed-off-by: Christian Lachner --- net/haproxy/Makefile | 12 +- net/haproxy/get-latest-patches.sh | 2 +- ...issing-HA_SPIN_INIT-in-pat_ref_newid.patch | 26 - ...-trying-to-read-from-a-closed-socket.patch | 87 --- ...idle-Ti-is-not-set-for-some-requests.patch | 50 -- ...gmentation-fault-if-a-Lua-task-exits.patch | 48 -- ...nce-of-CONNECT-and-or-content-length.patch | 147 ---- ...-support-for-chunked-encoded-uploads.patch | 164 ---- ...s-tasks-sticky-to-the-current-thread.patch | 36 - ...ig-disable-http-reuse-on-TCP-proxies.patch | 31 - ...lth-computation-for-flapping-servers.patch | 42 -- ...-sync-point-for-more-than-32-threads.patch | 48 -- ...tasks-to-sleep-when-waiting-for-data.patch | 31 - ...documentation-re-servers-array-table.patch | 252 ------- ...nce-to-the-last-ref_elt-being-dumped.patch | 31 - ...-free-a-task-that-is-about-to-be-run.patch | 128 ---- ...chedule-socket-task-upon-lua-connect.patch | 48 -- ...e-large-proxy-IDs-can-be-represented.patch | 38 - ...t-always-abort-transfers-on-CF_SHUTR.patch | 70 -- ...l-list-for-fd-shared-between-threads.patch | 709 ------------------ ...properly-protect-SSL-cert-generation.patch | 45 -- 21 files changed, 9 insertions(+), 2036 deletions(-) delete mode 100644 net/haproxy/patches/0000-BUG-MINOR-pattern-Add-a-missing-HA_SPIN_INIT-in-pat_ref_newid.patch delete mode 100644 net/haproxy/patches/0001-BUG-MAJOR-channel-Fix-crash-when-trying-to-read-from-a-closed-socket.patch delete mode 100644 net/haproxy/patches/0002-BUG-MINOR-log-t_idle-Ti-is-not-set-for-some-requests.patch delete mode 100644 net/haproxy/patches/0003-BUG-MEDIUM-lua-Fix-segmentation-fault-if-a-Lua-task-exits.patch delete mode 100644 net/haproxy/patches/0004-MINOR-h2-detect-presence-of-CONNECT-and-or-content-length.patch delete mode 100644 net/haproxy/patches/0005-BUG-MEDIUM-h2-implement-missing-support-for-chunked-encoded-uploads.patch delete mode 100644 net/haproxy/patches/0006-BUG-MINOR-lua-threads-Make-luas-tasks-sticky-to-the-current-thread.patch delete mode 100644 net/haproxy/patches/0007-BUG-MINOR-config-disable-http-reuse-on-TCP-proxies.patch delete mode 100644 net/haproxy/patches/0008-BUG-MINOR-checks-Fix-check--health-computation-for-flapping-servers.patch delete mode 100644 net/haproxy/patches/0009-BUG-MEDIUM-threads-Fix-the-sync-point-for-more-than-32-threads.patch delete mode 100644 net/haproxy/patches/0010-BUG-MINOR-lua-Put-tasks-to-sleep-when-waiting-for-data.patch delete mode 100644 net/haproxy/patches/0011-DOC-MINOR-clean-up-LUA-documentation-re-servers-array-table.patch delete mode 100644 net/haproxy/patches/0012-BUG-MINOR-map-correctly-track-reference-to-the-last-ref_elt-being-dumped.patch delete mode 100644 net/haproxy/patches/0013-BUG-MEDIUM-task-Dont-free-a-task-that-is-about-to-be-run.patch delete mode 100644 net/haproxy/patches/0014-BUG-MINOR-lua-schedule-socket-task-upon-lua-connect.patch delete mode 100644 net/haproxy/patches/0015-BUG-MINOR-lua-ensure-large-proxy-IDs-can-be-represented.patch delete mode 100644 net/haproxy/patches/0016-BUG-MEDIUM-http-dont-always-abort-transfers-on-CF_SHUTR.patch delete mode 100644 net/haproxy/patches/0017-BUG-MEDIUM-pollers-Use-a-global-list-for-fd-shared-between-threads.patch delete mode 100644 net/haproxy/patches/0018-BUG-MEDIUM-ssl-properly-protect-SSL-cert-generation.patch diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index fb213260c..21a2d1260 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -9,17 +9,21 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy -PKG_VERSION:=1.8.8 -PKG_RELEASE:=05 +PKG_VERSION:=1.8.9 +PKG_RELEASE:=00 PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/ -PKG_HASH:=bcc05ab824bd2f89b8b21ac05459c0a0a0e02247b57ffe441d52cfe771daea92 +PKG_HASH:=436b77927cd85bcd4c2cb3cbf7fb539a5362d9686fdcfa34f37550ca1f5db102 PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION) PKG_LICENSE:=GPL-2.0 MAINTAINER:=Thomas Heil +ifneq ($(PKG_RELEASE),00) + BUILD_VERSION:=-patch$(PKG_RELEASE) +endif + include $(INCLUDE_DIR)/package.mk define Package/haproxy/Default @@ -143,7 +147,7 @@ define Build/Compile SMALL_OPTS="-DBUFSIZE=16384 -DMAXREWRITE=1030 -DSYSTEM_MAXCONN=165530 " \ USE_LINUX_TPROXY=1 USE_LINUX_SPLICE=1 USE_TFO=1 \ USE_ZLIB=yes USE_PCRE=1 USE_PCRE_JIT=1 USE_GETADDRINFO=1 \ - VERSION="$(PKG_VERSION)-patch$(PKG_RELEASE)" \ + VERSION="$(PKG_VERSION)$(BUILD_VERSION)" \ $(ADDON) \ CFLAGS="$(TARGET_CFLAGS)" \ LD="$(TARGET_CC)" \ diff --git a/net/haproxy/get-latest-patches.sh b/net/haproxy/get-latest-patches.sh index b74107f21..98ce2c79b 100755 --- a/net/haproxy/get-latest-patches.sh +++ b/net/haproxy/get-latest-patches.sh @@ -1,7 +1,7 @@ #!/bin/bash CLONEURL=http://git.haproxy.org/git/haproxy-1.8.git -BASE_TAG=v1.8.8 +BASE_TAG=v1.8.9 TMP_REPODIR=tmprepo PATCHESDIR=patches diff --git a/net/haproxy/patches/0000-BUG-MINOR-pattern-Add-a-missing-HA_SPIN_INIT-in-pat_ref_newid.patch b/net/haproxy/patches/0000-BUG-MINOR-pattern-Add-a-missing-HA_SPIN_INIT-in-pat_ref_newid.patch deleted file mode 100644 index eb3a18a10..000000000 --- a/net/haproxy/patches/0000-BUG-MINOR-pattern-Add-a-missing-HA_SPIN_INIT-in-pat_ref_newid.patch +++ /dev/null @@ -1,26 +0,0 @@ -commit 6c9efc8219e35f4eb17e94b364f4c371cfb56cca -Author: Aurélien Nephtali -Date: Thu Apr 19 16:56:07 2018 +0200 - - BUG/MINOR: pattern: Add a missing HA_SPIN_INIT() in pat_ref_newid() - - pat_ref_newid() is lacking a spinlock init. It was probably forgotten - in b5997f740b ("MAJOR: threads/map: Make acls/maps thread safe"). - - Signed-off-by: Aurélien Nephtali - (cherry picked from commit 564d15a71ecb3ae3372767866335cfbc068c4b48) - Signed-off-by: Christopher Faulet - -diff --git a/src/pattern.c b/src/pattern.c -index fe672f12..2eb82650 100644 ---- a/src/pattern.c -+++ b/src/pattern.c -@@ -1906,7 +1906,7 @@ struct pat_ref *pat_ref_newid(int unique_id, const char *display, unsigned int f - ref->unique_id = unique_id; - LIST_INIT(&ref->head); - LIST_INIT(&ref->pat); -- -+ HA_SPIN_INIT(&ref->lock); - LIST_ADDQ(&pattern_reference, &ref->list); - - return ref; diff --git a/net/haproxy/patches/0001-BUG-MAJOR-channel-Fix-crash-when-trying-to-read-from-a-closed-socket.patch b/net/haproxy/patches/0001-BUG-MAJOR-channel-Fix-crash-when-trying-to-read-from-a-closed-socket.patch deleted file mode 100644 index 606c3bf09..000000000 --- a/net/haproxy/patches/0001-BUG-MAJOR-channel-Fix-crash-when-trying-to-read-from-a-closed-socket.patch +++ /dev/null @@ -1,87 +0,0 @@ -commit e0f6d4a4e8696140d1fcff812fb287d534d702e9 -Author: Tim Duesterhus -Date: Tue Apr 24 19:20:43 2018 +0200 - - BUG/MAJOR: channel: Fix crash when trying to read from a closed socket - - When haproxy is compiled using GCC <= 3.x or >= 5.x the `unlikely` - macro performs a comparison with zero: `(x) != 0`, thus returning - either 0 or 1. - - In `int co_getline_nc()` this macro was accidentally applied to - the variable `retcode` itself, instead of the result of the - comparison `retcode <= 0`. As a result any negative `retcode` - is converted to `1` for purposes of the comparison. - Thus never taking the branch (and exiting the function) for - negative values. - - This in turn leads to reads of uninitialized memory in the for-loop - below: - - ==12141== Conditional jump or move depends on uninitialised value(s) - ==12141== at 0x4EB6B4: co_getline_nc (channel.c:346) - ==12141== by 0x421CA4: hlua_socket_receive_yield (hlua.c:1713) - ==12141== by 0x421F6F: hlua_socket_receive (hlua.c:1896) - ==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529B497: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529711A: lua_pcallk (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x52ABDF0: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529A9F1: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529B523: lua_resume (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== - ==12141== Use of uninitialised value of size 8 - ==12141== at 0x4EB6B9: co_getline_nc (channel.c:346) - ==12141== by 0x421CA4: hlua_socket_receive_yield (hlua.c:1713) - ==12141== by 0x421F6F: hlua_socket_receive (hlua.c:1896) - ==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529B497: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529711A: lua_pcallk (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x52ABDF0: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529A9F1: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529B523: lua_resume (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== - ==12141== Invalid read of size 1 - ==12141== at 0x4EB6B9: co_getline_nc (channel.c:346) - ==12141== by 0x421CA4: hlua_socket_receive_yield (hlua.c:1713) - ==12141== by 0x421F6F: hlua_socket_receive (hlua.c:1896) - ==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529B497: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529711A: lua_pcallk (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x52ABDF0: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529A9F1: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== by 0x529B523: lua_resume (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==12141== Address 0x8637171e928bb500 is not stack'd, malloc'd or (recently) free'd - - Fix this bug by correctly applying the `unlikely` macro to the result of the comparison. - - This bug exists as of commit ca16b038132444dea06e6d83953034128a812bce - which is the first commit adding this function. - - v1.6-dev1 is the first tag containing this commit, the fix should - be backported to haproxy 1.6 and newer. - - (cherry picked from commit 45be38c9c7ba2b20806f2b887876db4fb5b9457c) - Signed-off-by: Christopher Faulet - -diff --git a/src/channel.c b/src/channel.c -index bd5c4de0..3770502c 100644 ---- a/src/channel.c -+++ b/src/channel.c -@@ -340,7 +340,7 @@ int co_getline_nc(const struct channel *chn, - int l; - - retcode = co_getblk_nc(chn, blk1, len1, blk2, len2); -- if (unlikely(retcode) <= 0) -+ if (unlikely(retcode <= 0)) - return retcode; - - for (l = 0; l < *len1 && (*blk1)[l] != '\n'; l++); diff --git a/net/haproxy/patches/0002-BUG-MINOR-log-t_idle-Ti-is-not-set-for-some-requests.patch b/net/haproxy/patches/0002-BUG-MINOR-log-t_idle-Ti-is-not-set-for-some-requests.patch deleted file mode 100644 index 9fffb9597..000000000 --- a/net/haproxy/patches/0002-BUG-MINOR-log-t_idle-Ti-is-not-set-for-some-requests.patch +++ /dev/null @@ -1,50 +0,0 @@ -commit 0e645ba57ddff9163a3d9b5626f189e974e671bd -Author: Rian McGuire -Date: Tue Apr 24 11:19:21 2018 -0300 - - BUG/MINOR: log: t_idle (%Ti) is not set for some requests - - If TCP content inspection is used, msg_state can be >= HTTP_MSG_ERROR - the first time http_wait_for_request is called. t_idle was being left - unset in that case. - - In the example below : - stick-table type string len 64 size 100k expire 60s - tcp-request inspect-delay 1s - tcp-request content track-sc1 hdr(X-Session) - - %Ti will always be -1, because the msg_state is already at HTTP_MSG_BODY - when http_wait_for_request is called for the first time. - - This patch should backported to 1.8 and 1.7. - - (cherry picked from commit 89fcb7d929283e904cabad58de495d62fc753da2) - Signed-off-by: Christopher Faulet - -diff --git a/src/proto_http.c b/src/proto_http.c -index b38dd84f..4c18a27c 100644 ---- a/src/proto_http.c -+++ b/src/proto_http.c -@@ -1618,18 +1618,16 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit) - /* we're speaking HTTP here, so let's speak HTTP to the client */ - s->srv_error = http_return_srv_error; - -+ /* If there is data available for analysis, log the end of the idle time. */ -+ if (buffer_not_empty(req->buf) && s->logs.t_idle == -1) -+ s->logs.t_idle = tv_ms_elapsed(&s->logs.tv_accept, &now) - s->logs.t_handshake; -+ - /* There's a protected area at the end of the buffer for rewriting - * purposes. We don't want to start to parse the request if the - * protected area is affected, because we may have to move processed - * data later, which is much more complicated. - */ - if (buffer_not_empty(req->buf) && msg->msg_state < HTTP_MSG_ERROR) { -- -- /* This point is executed when some data is avalaible for analysis, -- * so we log the end of the idle time. */ -- if (s->logs.t_idle == -1) -- s->logs.t_idle = tv_ms_elapsed(&s->logs.tv_accept, &now) - s->logs.t_handshake; -- - if (txn->flags & TX_NOT_FIRST) { - if (unlikely(!channel_is_rewritable(req))) { - if (req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) diff --git a/net/haproxy/patches/0003-BUG-MEDIUM-lua-Fix-segmentation-fault-if-a-Lua-task-exits.patch b/net/haproxy/patches/0003-BUG-MEDIUM-lua-Fix-segmentation-fault-if-a-Lua-task-exits.patch deleted file mode 100644 index 2a52c7cbd..000000000 --- a/net/haproxy/patches/0003-BUG-MEDIUM-lua-Fix-segmentation-fault-if-a-Lua-task-exits.patch +++ /dev/null @@ -1,48 +0,0 @@ -commit 17f3e16826e5b1a3f79b7421d69bb85be09a4ad9 -Author: Tim Duesterhus -Date: Tue Apr 24 13:56:01 2018 +0200 - - BUG/MEDIUM: lua: Fix segmentation fault if a Lua task exits - - PiBa-NL reported that haproxy crashes with a segmentation fault - if a function registered using `core.register_task` returns. - - An example Lua script that reproduces the bug is: - - mytask = function() - core.Info("Stopping task") - end - core.register_task(mytask) - - The Valgrind output is as follows: - - ==6759== Process terminating with default action of signal 11 (SIGSEGV) - ==6759== Access not within mapped region at address 0x20 - ==6759== at 0x5B60AA9: lua_sethook (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0) - ==6759== by 0x430264: hlua_ctx_resume (hlua.c:1009) - ==6759== by 0x43BB68: hlua_process_task (hlua.c:5525) - ==6759== by 0x4FED0A: process_runnable_tasks (task.c:231) - ==6759== by 0x4B2256: run_poll_loop (haproxy.c:2397) - ==6759== by 0x4B2256: run_thread_poll_loop (haproxy.c:2459) - ==6759== by 0x41A7E4: main (haproxy.c:3049) - - Add the missing `task = NULL` for the `HLUA_E_OK` case. The error cases - have been fixed as of 253e53e661c49fb9723535319cf511152bf09bc7 which - first was included in haproxy v1.8-dev3. This bugfix should be backported - to haproxy 1.8. - - (cherry picked from commit cd235c60425dbe66c9015a357369afacc4880211) - Signed-off-by: Christopher Faulet - -diff --git a/src/hlua.c b/src/hlua.c -index 4e759c7c..d4b7ce91 100644 ---- a/src/hlua.c -+++ b/src/hlua.c -@@ -5528,6 +5528,7 @@ static struct task *hlua_process_task(struct task *task) - hlua_ctx_destroy(hlua); - task_delete(task); - task_free(task); -+ task = NULL; - break; - - case HLUA_E_AGAIN: /* co process or timeout wake me later. */ diff --git a/net/haproxy/patches/0004-MINOR-h2-detect-presence-of-CONNECT-and-or-content-length.patch b/net/haproxy/patches/0004-MINOR-h2-detect-presence-of-CONNECT-and-or-content-length.patch deleted file mode 100644 index 4369ff04a..000000000 --- a/net/haproxy/patches/0004-MINOR-h2-detect-presence-of-CONNECT-and-or-content-length.patch +++ /dev/null @@ -1,147 +0,0 @@ -commit a8bcc7dd3fe5aa615f21e795375ff9225f004498 -Author: Willy Tarreau -Date: Wed Apr 25 18:13:58 2018 +0200 - - MINOR: h2: detect presence of CONNECT and/or content-length - - We'll need this in order to support uploading chunks. The h2 to h1 - converter checks for the presence of the content-length header field - as well as the CONNECT method and returns these information to the - caller. The caller indicates whether or not a body is detected for - the message (presence of END_STREAM or not). No transfer-encoding - header is emitted yet. - - (cherry picked from commit 174b06a572ef141f15d8b7ea64eb6b34ec4c9af1) - Signed-off-by: Christopher Faulet - -diff --git a/include/common/h2.h b/include/common/h2.h -index 65c5ab1c..576ed105 100644 ---- a/include/common/h2.h -+++ b/include/common/h2.h -@@ -145,9 +145,15 @@ enum h2_err { - "\x0d\x0a\x53\x4d\x0d\x0a\x0d\x0a" - - -+/* some flags related to protocol parsing */ -+#define H2_MSGF_BODY 0x0001 // a body is present -+#define H2_MSGF_BODY_CL 0x0002 // content-length is present -+#define H2_MSGF_BODY_TUNNEL 0x0004 // a tunnel is in use (CONNECT) -+ -+ - /* various protocol processing functions */ - --int h2_make_h1_request(struct http_hdr *list, char *out, int osize); -+int h2_make_h1_request(struct http_hdr *list, char *out, int osize, unsigned int *msgf); - - /* - * Some helpful debugging functions. -diff --git a/src/h2.c b/src/h2.c -index 43ed7f3c..7d9ddd50 100644 ---- a/src/h2.c -+++ b/src/h2.c -@@ -36,9 +36,10 @@ - * stored in . indicates what was found so far. This should be - * called once at the detection of the first general header field or at the end - * of the request if no general header field was found yet. Returns 0 on success -- * or a negative error code on failure. -+ * or a negative error code on failure. Upon success, is updated with a -+ * few H2_MSGF_* flags indicating what was found while parsing. - */ --static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr, char *end) -+static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr, char *end, unsigned int *msgf) - { - char *out = *ptr; - int uri_idx = H2_PHDR_IDX_PATH; -@@ -62,6 +63,7 @@ static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr, - } - // otherwise OK ; let's use the authority instead of the URI - uri_idx = H2_PHDR_IDX_AUTH; -+ *msgf |= H2_MSGF_BODY_TUNNEL; - } - else if ((fields & (H2_PHDR_FND_METH|H2_PHDR_FND_SCHM|H2_PHDR_FND_PATH)) != - (H2_PHDR_FND_METH|H2_PHDR_FND_SCHM|H2_PHDR_FND_PATH)) { -@@ -113,6 +115,10 @@ static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr, - * for a max of bytes, and the amount of bytes emitted is returned. In - * case of error, a negative error code is returned. - * -+ * Upon success, is filled with a few H2_MSGF_* flags indicating what -+ * was found while parsing. The caller must set it to zero in or H2_MSGF_BODY -+ * if a body is detected (!ES). -+ * - * The headers list must be composed of : - * - n.name != NULL, n.len > 0 : literal header name - * - n.name == NULL, n.len > 0 : indexed pseudo header name number -@@ -124,7 +130,7 @@ static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr, - * The Cookie header will be reassembled at the end, and for this, the - * will be used to create a linked list, so its contents may be destroyed. - */ --int h2_make_h1_request(struct http_hdr *list, char *out, int osize) -+int h2_make_h1_request(struct http_hdr *list, char *out, int osize, unsigned int *msgf) - { - struct ist phdr_val[H2_PHDR_NUM_ENTRIES]; - char *out_end = out + osize; -@@ -176,7 +182,7 @@ int h2_make_h1_request(struct http_hdr *list, char *out, int osize) - /* regular header field in (name,value) */ - if (!(fields & H2_PHDR_FND_NONE)) { - /* no more pseudo-headers, time to build the request line */ -- ret = h2_prepare_h1_reqline(fields, phdr_val, &out, out_end); -+ ret = h2_prepare_h1_reqline(fields, phdr_val, &out, out_end, msgf); - if (ret != 0) - goto leave; - fields |= H2_PHDR_FND_NONE; -@@ -185,6 +191,10 @@ int h2_make_h1_request(struct http_hdr *list, char *out, int osize) - if (isteq(list[idx].n, ist("host"))) - fields |= H2_PHDR_FND_HOST; - -+ if ((*msgf & (H2_MSGF_BODY|H2_MSGF_BODY_TUNNEL|H2_MSGF_BODY_CL)) == H2_MSGF_BODY && -+ isteq(list[idx].n, ist("content-length"))) -+ *msgf |= H2_MSGF_BODY_CL; -+ - /* these ones are forbidden in requests (RFC7540#8.1.2.2) */ - if (isteq(list[idx].n, ist("connection")) || - isteq(list[idx].n, ist("proxy-connection")) || -@@ -232,7 +242,7 @@ int h2_make_h1_request(struct http_hdr *list, char *out, int osize) - - /* Let's dump the request now if not yet emitted. */ - if (!(fields & H2_PHDR_FND_NONE)) { -- ret = h2_prepare_h1_reqline(fields, phdr_val, &out, out_end); -+ ret = h2_prepare_h1_reqline(fields, phdr_val, &out, out_end, msgf); - if (ret != 0) - goto leave; - } -diff --git a/src/mux_h2.c b/src/mux_h2.c -index 4fde7fcc..82dd414a 100644 ---- a/src/mux_h2.c -+++ b/src/mux_h2.c -@@ -2626,6 +2626,7 @@ static int h2_frt_decode_headers(struct h2s *h2s, struct buffer *buf, int count) - struct chunk *tmp = get_trash_chunk(); - struct http_hdr list[MAX_HTTP_HDR * 2]; - struct chunk *copy = NULL; -+ unsigned int msgf; - int flen = h2c->dfl; - int outlen = 0; - int wrap; -@@ -2727,13 +2728,22 @@ static int h2_frt_decode_headers(struct h2s *h2s, struct buffer *buf, int count) - } - - /* OK now we have our header list in */ -- outlen = h2_make_h1_request(list, bi_end(buf), try); -+ msgf = (h2c->dff & H2_F_DATA_END_STREAM) ? 0 : H2_MSGF_BODY; -+ outlen = h2_make_h1_request(list, bi_end(buf), try, &msgf); - - if (outlen < 0) { - h2c_error(h2c, H2_ERR_COMPRESSION_ERROR); - goto fail; - } - -+ if (msgf & H2_MSGF_BODY) { -+ /* a payload is present */ -+ if (msgf & H2_MSGF_BODY_CL) -+ h2s->flags |= H2_SF_DATA_CLEN; -+ else if (!(msgf & H2_MSGF_BODY_TUNNEL)) -+ h2s->flags |= H2_SF_DATA_CHNK; -+ } -+ - /* now consume the input data */ - bi_del(h2c->dbuf, h2c->dfl); - h2c->st0 = H2_CS_FRAME_H; diff --git a/net/haproxy/patches/0005-BUG-MEDIUM-h2-implement-missing-support-for-chunked-encoded-uploads.patch b/net/haproxy/patches/0005-BUG-MEDIUM-h2-implement-missing-support-for-chunked-encoded-uploads.patch deleted file mode 100644 index f2dc3e6f3..000000000 --- a/net/haproxy/patches/0005-BUG-MEDIUM-h2-implement-missing-support-for-chunked-encoded-uploads.patch +++ /dev/null @@ -1,164 +0,0 @@ -commit 05657bd24ebaf20e5c508a435be9a0830591f033 -Author: Willy Tarreau -Date: Wed Apr 25 20:44:22 2018 +0200 - - BUG/MEDIUM: h2: implement missing support for chunked encoded uploads - - Upload requests not carrying a content-length nor tunnelling data must - be sent chunked-encoded over HTTP/1. The code was planned but for some - reason forgotten during the implementation, leading to such payloads to - be sent as tunnelled data. - - Browsers always emit a content length in uploads so this problem doesn't - happen for most sites. However some applications may send data frames - after a request without indicating it earlier. - - The only way to detect that a client will need to send data is that the - HEADERS frame doesn't hold the ES bit. In this case it's wise to look - for the content-length header. If it's not there, either we're in tunnel - (CONNECT method) or chunked-encoding (other methods). - - This patch implements this. - - The following request is sent using content-length : - - curl --http2 -sk https://127.0.0.1:4443/s2 -XPOST -T /large/file - - and these ones using chunked-encoding : - - curl --http2 -sk https://127.0.0.1:4443/s2 -XPUT -T /large/file - curl --http2 -sk https://127.0.0.1:4443/s2 -XPUT -T - < /dev/urandom - - Thanks to Robert Samuel Newson for raising this issue with details. - This fix must be backported to 1.8. - - (cherry picked from commit eba10f24b7da27cde60d2db24aeb1147e1657579) - Signed-off-by: Christopher Faulet - -diff --git a/src/h2.c b/src/h2.c -index 7d9ddd50..5c83d6b6 100644 ---- a/src/h2.c -+++ b/src/h2.c -@@ -262,6 +262,14 @@ int h2_make_h1_request(struct http_hdr *list, char *out, int osize, unsigned int - *(out++) = '\n'; - } - -+ if ((*msgf & (H2_MSGF_BODY|H2_MSGF_BODY_TUNNEL|H2_MSGF_BODY_CL)) == H2_MSGF_BODY) { -+ /* add chunked encoding */ -+ if (out + 28 > out_end) -+ goto fail; -+ memcpy(out, "transfer-encoding: chunked\r\n", 28); -+ out += 28; -+ } -+ - /* now we may have to build a cookie list. We'll dump the values of all - * visited headers. - */ -diff --git a/src/mux_h2.c b/src/mux_h2.c -index 82dd414a..5f1da0df 100644 ---- a/src/mux_h2.c -+++ b/src/mux_h2.c -@@ -2785,6 +2785,7 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count) - struct h2c *h2c = h2s->h2c; - int block1, block2; - unsigned int flen = h2c->dfl; -+ unsigned int chklen = 0; - - h2s->cs->flags &= ~CS_FL_RCV_MORE; - h2c->flags &= ~H2_CF_DEM_SFULL; -@@ -2820,14 +2821,35 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count) - return 0; - } - -+ /* chunked-encoding requires more room */ -+ if (h2s->flags & H2_SF_DATA_CHNK) { -+ chklen = MIN(flen, count); -+ chklen = (chklen < 16) ? 1 : (chklen < 256) ? 2 : -+ (chklen < 4096) ? 3 : (chklen < 65536) ? 4 : -+ (chklen < 1048576) ? 4 : 8; -+ chklen += 4; // CRLF, CRLF -+ } -+ - /* does it fit in output buffer or should we wait ? */ -- if (flen > count) { -- flen = count; -- if (!flen) { -- h2c->flags |= H2_CF_DEM_SFULL; -- h2s->cs->flags |= CS_FL_RCV_MORE; -- return 0; -- } -+ if (flen + chklen > count) { -+ if (chklen >= count) -+ goto full; -+ flen = count - chklen; -+ } -+ -+ if (h2s->flags & H2_SF_DATA_CHNK) { -+ /* emit the chunk size */ -+ unsigned int chksz = flen; -+ char str[10]; -+ char *beg; -+ -+ beg = str + sizeof(str); -+ *--beg = '\n'; -+ *--beg = '\r'; -+ do { -+ *--beg = hextab[chksz & 0xF]; -+ } while (chksz >>= 4); -+ bi_putblk(buf, beg, str + sizeof(str) - beg); - } - - /* Block1 is the length of the first block before the buffer wraps, -@@ -2844,6 +2866,11 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count) - if (block2) - bi_putblk(buf, b_ptr(h2c->dbuf, block1), block2); - -+ if (h2s->flags & H2_SF_DATA_CHNK) { -+ /* emit the CRLF */ -+ bi_putblk(buf, "\r\n", 2); -+ } -+ - /* now mark the input data as consumed (will be deleted from the buffer - * by the caller when seeing FRAME_A after sending the window update). - */ -@@ -2854,15 +2881,22 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count) - - if (h2c->dfl > h2c->dpl) { - /* more data available, transfer stalled on stream full */ -- h2c->flags |= H2_CF_DEM_SFULL; -- h2s->cs->flags |= CS_FL_RCV_MORE; -- return flen; -+ goto more; - } - - end_transfer: - /* here we're done with the frame, all the payload (except padding) was - * transferred. - */ -+ -+ if (h2c->dff & H2_F_DATA_END_STREAM && h2s->flags & H2_SF_DATA_CHNK) { -+ /* emit the trailing 0 CRLF CRLF */ -+ if (count < 5) -+ goto more; -+ chklen += 5; -+ bi_putblk(buf, "0\r\n\r\n", 5); -+ } -+ - h2c->rcvd_c += h2c->dpl; - h2c->rcvd_s += h2c->dpl; - h2c->dpl = 0; -@@ -2877,7 +2911,13 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count) - h2s->flags |= H2_SF_ES_RCVD; - } - -- return flen; -+ return flen + chklen; -+ full: -+ flen = chklen = 0; -+ more: -+ h2c->flags |= H2_CF_DEM_SFULL; -+ h2s->cs->flags |= CS_FL_RCV_MORE; -+ return flen + chklen; - } - - /* diff --git a/net/haproxy/patches/0006-BUG-MINOR-lua-threads-Make-luas-tasks-sticky-to-the-current-thread.patch b/net/haproxy/patches/0006-BUG-MINOR-lua-threads-Make-luas-tasks-sticky-to-the-current-thread.patch deleted file mode 100644 index f6f9d55e5..000000000 --- a/net/haproxy/patches/0006-BUG-MINOR-lua-threads-Make-luas-tasks-sticky-to-the-current-thread.patch +++ /dev/null @@ -1,36 +0,0 @@ -commit 8b8d55be7e94ee3d758d41a21fa86a036e91a264 -Author: Christopher Faulet -Date: Wed Apr 25 10:34:45 2018 +0200 - - BUG/MINOR: lua/threads: Make lua's tasks sticky to the current thread - - PiBa-NL reported a bug with tasks registered in lua when HAProxy is started with - serveral threads. These tasks have not specific affinity with threads so they - can be woken up on any threads. So, it is impossbile for these tasks to handled - cosockets or applets, because cosockets and applets are sticky on the thread - which created them. It is forbbiden to manipulate a cosocket from another - thread. - - So to fix the bug, tasks registered in lua are now sticky to the current - thread. Because these tasks can be registered before threads creation, the - affinity is set the first time a lua's task is processed. - - This patch must be backported in HAProxy 1.8. - - (cherry picked from commit 5bc9972ed836517924eea91954d255d317a53418) - Signed-off-by: Christopher Faulet - -diff --git a/src/hlua.c b/src/hlua.c -index d4b7ce91..bd0b87e3 100644 ---- a/src/hlua.c -+++ b/src/hlua.c -@@ -5513,6 +5513,9 @@ static struct task *hlua_process_task(struct task *task) - struct hlua *hlua = task->context; - enum hlua_exec status; - -+ if (task->thread_mask == MAX_THREADS_MASK) -+ task_set_affinity(task, tid_bit); -+ - /* If it is the first call to the task, we must initialize the - * execution timeouts. - */ diff --git a/net/haproxy/patches/0007-BUG-MINOR-config-disable-http-reuse-on-TCP-proxies.patch b/net/haproxy/patches/0007-BUG-MINOR-config-disable-http-reuse-on-TCP-proxies.patch deleted file mode 100644 index fdca4ea07..000000000 --- a/net/haproxy/patches/0007-BUG-MINOR-config-disable-http-reuse-on-TCP-proxies.patch +++ /dev/null @@ -1,31 +0,0 @@ -commit 80e179128cfd78d95cdebf7195fd21299e7931b6 -Author: Willy Tarreau -Date: Sat Apr 28 07:18:15 2018 +0200 - - BUG/MINOR: config: disable http-reuse on TCP proxies - - Louis Chanouha reported an inappropriate warning when http-reuse is - present in a defaults section while a TCP proxy accidently inherits - it and finds a conflict with other options like the use of the PROXY - protocol. To fix this patch removes the http-reuse option for TCP - proxies. - - This fix needs to be backported to 1.8, 1.7 and possibly 1.6. - - (cherry picked from commit 46deab6e64bfda7211b7c3199ad01f136141c86f) - Signed-off-by: Christopher Faulet - -diff --git a/src/cfgparse.c b/src/cfgparse.c -index 5a460381..63d2de58 100644 ---- a/src/cfgparse.c -+++ b/src/cfgparse.c -@@ -8702,6 +8702,9 @@ out_uri_auth_compat: - } - #endif - -+ if ((curproxy->mode != PR_MODE_HTTP) && (curproxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) -+ curproxy->options &= ~PR_O_REUSE_MASK; -+ - if ((curproxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) { - if ((curproxy->conn_src.opts & CO_SRC_TPROXY_MASK) == CO_SRC_TPROXY_CLI || - (curproxy->conn_src.opts & CO_SRC_TPROXY_MASK) == CO_SRC_TPROXY_CIP || diff --git a/net/haproxy/patches/0008-BUG-MINOR-checks-Fix-check--health-computation-for-flapping-servers.patch b/net/haproxy/patches/0008-BUG-MINOR-checks-Fix-check--health-computation-for-flapping-servers.patch deleted file mode 100644 index d2dd8d899..000000000 --- a/net/haproxy/patches/0008-BUG-MINOR-checks-Fix-check--health-computation-for-flapping-servers.patch +++ /dev/null @@ -1,42 +0,0 @@ -commit edb5a1efd22eb9918574d962640cd2ae3bb45ad3 -Author: Christopher Faulet -Date: Wed May 2 12:12:45 2018 +0200 - - BUG/MINOR: checks: Fix check->health computation for flapping servers - - This patch fixes an old bug introduced in the commit 7b1d47ce ("MAJOR: checks: - move health checks changes to set_server_check_status()"). When a DOWN server is - flapping, everytime a check succeds, check->health is incremented. But when a - check fails, it is decremented only when it is higher than the rise value. So if - only one check succeds for a DOWN server, check->health will remain set to 1 for - all subsequent failing checks. - - So, at first glance, it seems not that terrible because the server remains - DOWN. But it is reported in the transitional state "DOWN server, going up". And - it will remain in this state until it is UP again. And there is also an - insidious side effect. If a DOWN server is flapping time to time, It will end to - be considered UP after a uniq successful check, , regardless the rise threshold, - because check->health will be increased slowly and never decreased. - - To fix the bug, we just need to reset check->health to 0 when a check fails for - a DOWN server. To do so, we just need to relax the condition to handle a failure - in the function set_server_check_status. - - This patch must be backported to haproxy 1.5 and newer. - - (cherry picked from commit b119a79fc336f2b6074de1c3113b1682c717985c) - Signed-off-by: Willy Tarreau - -diff --git a/src/checks.c b/src/checks.c -index 80a9c70d..d07a82f8 100644 ---- a/src/checks.c -+++ b/src/checks.c -@@ -243,7 +243,7 @@ static void set_server_check_status(struct check *check, short status, const cha - */ - if ((!(check->state & CHK_ST_AGENT) || - (check->status >= HCHK_STATUS_L57DATA)) && -- (check->health >= check->rise)) { -+ (check->health > 0)) { - HA_ATOMIC_ADD(&s->counters.failed_checks, 1); - report = 1; - check->health--; diff --git a/net/haproxy/patches/0009-BUG-MEDIUM-threads-Fix-the-sync-point-for-more-than-32-threads.patch b/net/haproxy/patches/0009-BUG-MEDIUM-threads-Fix-the-sync-point-for-more-than-32-threads.patch deleted file mode 100644 index 7574b6907..000000000 --- a/net/haproxy/patches/0009-BUG-MEDIUM-threads-Fix-the-sync-point-for-more-than-32-threads.patch +++ /dev/null @@ -1,48 +0,0 @@ -commit 830324444e57c042666b17ac4584352cca85dafd -Author: Christopher Faulet -Date: Wed May 2 16:58:40 2018 +0200 - - BUG/MEDIUM: threads: Fix the sync point for more than 32 threads - - In the sync point, to know if a thread has requested a synchronization, we call - the function thread_need_sync(). It should return 1 if yes, otherwise it should - return 0. It is intended to return a signed integer. - - But internally, instead of returning 0 or 1, it returns 0 or tid_bit - (threads_want_sync & tid_bit). So, tid_bit is casted in integer. For the first - 32 threads, it's ok, because we always check if thread_need_sync() returns - something else than 0. But this is a problem if HAProxy is started with more - than 32 threads, because for threads 33 to 64 (so for tid 32 to 63), their - tid_bit casted to integer are evaluated to 0. So the sync point does not work for - more than 32 threads. - - Now, the function thread_need_sync() respects its contract, returning 0 or - 1. the function thread_no_sync() has also been updated to avoid any ambiguities. - - This patch must be backported in HAProxy 1.8. - - (cherry picked from commit 148b16e1ceb819dfcef4c45828121d9cd7474b35) - Signed-off-by: Willy Tarreau - -diff --git a/src/hathreads.c b/src/hathreads.c -index daf226ce..944a0d5b 100644 ---- a/src/hathreads.c -+++ b/src/hathreads.c -@@ -85,7 +85,7 @@ void thread_want_sync() - /* Returns 1 if no thread has requested a sync. Otherwise, it returns 0. */ - int thread_no_sync() - { -- return (threads_want_sync == 0); -+ return (threads_want_sync == 0UL); - } - - /* Returns 1 if the current thread has requested a sync. Otherwise, it returns -@@ -93,7 +93,7 @@ int thread_no_sync() - */ - int thread_need_sync() - { -- return (threads_want_sync & tid_bit); -+ return ((threads_want_sync & tid_bit) != 0UL); - } - - /* Thread barrier. Synchronizes all threads at the barrier referenced by diff --git a/net/haproxy/patches/0010-BUG-MINOR-lua-Put-tasks-to-sleep-when-waiting-for-data.patch b/net/haproxy/patches/0010-BUG-MINOR-lua-Put-tasks-to-sleep-when-waiting-for-data.patch deleted file mode 100644 index 3b298f963..000000000 --- a/net/haproxy/patches/0010-BUG-MINOR-lua-Put-tasks-to-sleep-when-waiting-for-data.patch +++ /dev/null @@ -1,31 +0,0 @@ -commit 335bc7b74eee84f0a3bcb615cadd23fe01d1336c -Author: PiBa-NL -Date: Wed May 2 22:27:14 2018 +0200 - - BUG/MINOR: lua: Put tasks to sleep when waiting for data - - If a lua socket is waiting for data it currently spins at 100% cpu usage. - This because the TICK_ETERNITY returned by the socket is ignored when - setting the 'expire' time of the task. - - Fixed by removing the check for yields that return TICK_ETERNITY. - - This should be backported to at least 1.8. - - (cherry picked from commit fe971b35aeca9994f3823112c783aa796e74075a) - Signed-off-by: Willy Tarreau - -diff --git a/src/hlua.c b/src/hlua.c -index bd0b87e3..0100e7cf 100644 ---- a/src/hlua.c -+++ b/src/hlua.c -@@ -5536,8 +5536,7 @@ static struct task *hlua_process_task(struct task *task) - - case HLUA_E_AGAIN: /* co process or timeout wake me later. */ - notification_gc(&hlua->com); -- if (hlua->wake_time != TICK_ETERNITY) -- task->expire = hlua->wake_time; -+ task->expire = hlua->wake_time; - break; - - /* finished with error. */ diff --git a/net/haproxy/patches/0011-DOC-MINOR-clean-up-LUA-documentation-re-servers-array-table.patch b/net/haproxy/patches/0011-DOC-MINOR-clean-up-LUA-documentation-re-servers-array-table.patch deleted file mode 100644 index 7f210e533..000000000 --- a/net/haproxy/patches/0011-DOC-MINOR-clean-up-LUA-documentation-re-servers-array-table.patch +++ /dev/null @@ -1,252 +0,0 @@ -commit 016feef5483397491af3242162934d9e9dbc6263 -Author: Patrick Hemmer -Date: Tue May 1 21:30:41 2018 -0400 - - DOC/MINOR: clean up LUA documentation re: servers & array/table. - - * A few typos - * Fix definitions of values which are tables, not arrays. - * Consistent US English naming for "server" instead of "serveur". - - [tfo: should be backported to 1.6 and higher] - - (cherry picked from commit c6a1d711a4d47d68611aa28adecdadba96221bde) - Signed-off-by: Willy Tarreau - -diff --git a/doc/lua-api/index.rst b/doc/lua-api/index.rst -index e7aa425d..2d210945 100644 ---- a/doc/lua-api/index.rst -+++ b/doc/lua-api/index.rst -@@ -169,9 +169,9 @@ Core class - - **context**: task, action, sample-fetch, converter - -- This attribute is an array of declared proxies (frontend and backends). Each -- proxy give an access to his list of listeners and servers. Each entry is of -- type :ref:`proxy_class` -+ This attribute is a table of declared proxies (frontend and backends). Each -+ proxy give an access to his list of listeners and servers. The table is -+ indexed by proxy name, and each entry is of type :ref:`proxy_class`. - - Warning, if you are declared frontend and backend with the same name, only one - of these are listed. -@@ -183,12 +183,9 @@ Core class - - **context**: task, action, sample-fetch, converter - -- This attribute is an array of declared proxies with backend capability. Each -- proxy give an access to his list of listeners and servers. Each entry is of -- type :ref:`proxy_class` -- -- Warning, if you are declared frontend and backend with the same name, only one -- of these are listed. -+ This attribute is a table of declared proxies with backend capability. Each -+ proxy give an access to his list of listeners and servers. The table is -+ indexed by the backend name, and each entry is of type :ref:`proxy_class`. - - :see: :js:attr:`core.proxies` - :see: :js:attr:`core.frontends` -@@ -197,12 +194,9 @@ Core class - - **context**: task, action, sample-fetch, converter - -- This attribute is an array of declared proxies with frontend capability. Each -- proxy give an access to his list of listeners and servers. Each entry is of -- type :ref:`proxy_class` -- -- Warning, if you are declared frontend and backend with the same name, only one -- of these are listed. -+ This attribute is a table of declared proxies with frontend capability. Each -+ proxy give an access to his list of listeners and servers. The table is -+ indexed by the frontend name, and each entry is of type :ref:`proxy_class`. - - :see: :js:attr:`core.proxies` - :see: :js:attr:`core.backends` -@@ -336,7 +330,7 @@ Core class - Lua execution or resume, so two consecutive call to the function "now" will - probably returns the same result. - -- :returns: an array which contains two entries "sec" and "usec". "sec" -+ :returns: a table which contains two entries "sec" and "usec". "sec" - contains the current at the epoch format, and "usec" contains the - current microseconds. - -@@ -439,9 +433,12 @@ Core class - - **context**: body, init, task, action, sample-fetch, converter - -- proxies is an array containing the list of all proxies declared in the -- configuration file. Each entry of the proxies array is an object of type -- :ref:`proxy_class` -+ proxies is a table containing the list of all proxies declared in the -+ configuration file. The table is indexed by the proxy name, and each entry -+ of the proxies table is an object of type :ref:`proxy_class`. -+ -+ Warning, if you have declared a frontend and backend with the same name, only -+ one of these are listed. - - .. js:function:: core.register_action(name, actions, func [, nb_args]) - -@@ -852,13 +849,14 @@ Proxy class - - .. js:attribute:: Proxy.servers - -- Contain an array with the attached servers. Each server entry is an object of -- type :ref:`server_class`. -+ Contain a table with the attached servers. The table is indexed by server -+ name, and each server entry is an object of type :ref:`server_class`. - - .. js:attribute:: Proxy.listeners - -- Contain an array with the attached listeners. Each listeners entry is an -- object of type :ref:`listener_class`. -+ Contain a table with the attached listeners. The table is indexed by listener -+ name, and each each listeners entry is an object of type -+ :ref:`listener_class`. - - .. js:function:: Proxy.pause(px) - -@@ -908,21 +906,25 @@ Proxy class - - .. js:function:: Proxy.get_stats(px) - -- Returns an array containg the proxy statistics. The statistics returned are -+ Returns a table containg the proxy statistics. The statistics returned are - not the same if the proxy is frontend or a backend. - - :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated - proxy. -- :returns: a key/value array containing stats -+ :returns: a key/value table containing stats - - .. _server_class: - - Server class - ============ - -+.. js:class:: Server -+ -+ This class provides a way for manipulating servers and retrieving information. -+ - .. js:function:: Server.is_draining(sv) - -- Return true if the server is currently draining stiky connections. -+ Return true if the server is currently draining sticky connections. - - :param class_server sv: A :ref:`server_class` which indicates the manipulated - server. -@@ -930,7 +932,7 @@ Server class - - .. js:function:: Server.set_weight(sv, weight) - -- Dynamically change the weight of the serveur. See the management socket -+ Dynamically change the weight of the server. See the management socket - documentation for more information about the format of the string. - - :param class_server sv: A :ref:`server_class` which indicates the manipulated -@@ -939,7 +941,7 @@ Server class - - .. js:function:: Server.get_weight(sv) - -- This function returns an integer representing the serveur weight. -+ This function returns an integer representing the server weight. - - :param class_server sv: A :ref:`server_class` which indicates the manipulated - server. -@@ -947,16 +949,16 @@ Server class - - .. js:function:: Server.set_addr(sv, addr) - -- Dynamically change the address of the serveur. See the management socket -+ Dynamically change the address of the server. See the management socket - documentation for more information about the format of the string. - - :param class_server sv: A :ref:`server_class` which indicates the manipulated - server. -- :param string weight: A string describing the server address. -+ :param string addr: A string describing the server address. - - .. js:function:: Server.get_addr(sv) - -- Returns a string describing the address of the serveur. -+ Returns a string describing the address of the server. - - :param class_server sv: A :ref:`server_class` which indicates the manipulated - server. -@@ -968,7 +970,7 @@ Server class - - :param class_server sv: A :ref:`server_class` which indicates the manipulated - server. -- :returns: a key/value array containing stats -+ :returns: a key/value table containing stats - - .. js:function:: Server.shut_sess(sv) - -@@ -1085,7 +1087,7 @@ Listener class - - :param class_listener ls: A :ref:`listener_class` which indicates the - manipulated listener. -- :returns: a key/value array containing stats -+ :returns: a key/value table containing stats - - .. _concat_class: - -@@ -1169,7 +1171,7 @@ Fetches class - usage. they are the chapters 7.3.2 to 7.3.6. - - **warning** some sample fetches are not available in some context. These -- limitations are specified in this documentation when theire useful. -+ limitations are specified in this documentation when they're useful. - - :see: :js:attr:`TXN.f` - :see: :js:attr:`TXN.sf` -@@ -1345,13 +1347,13 @@ HTTP class - - .. js:function:: HTTP.req_get_headers(http) - -- Returns an array containing all the request headers. -+ Returns a table containing all the request headers. - - :param class_http http: The related http object. -- :returns: array of headers. -+ :returns: table of headers. - :see: :js:func:`HTTP.res_get_headers` - -- This is the form of the returned array: -+ This is the form of the returned table: - - .. code-block:: lua - -@@ -1366,13 +1368,13 @@ HTTP class - - .. js:function:: HTTP.res_get_headers(http) - -- Returns an array containing all the response headers. -+ Returns a table containing all the response headers. - - :param class_http http: The related http object. -- :returns: array of headers. -+ :returns: table of headers. - :see: :js:func:`HTTP.req_get_headers` - -- This is the form of the returned array: -+ This is the form of the returned table: - - .. code-block:: lua - -@@ -2210,12 +2212,12 @@ AppletHTTP class - - .. js:attribute:: AppletHTTP.headers - -- :returns: array -+ :returns: table - -- The attribute headers returns an array containing the HTTP -+ The attribute headers returns a table containing the HTTP - headers. The header names are always in lower case. As the header name can be - encountered more than once in each request, the value is indexed with 0 as -- first index value. The array have this form: -+ first index value. The table have this form: - - .. code-block:: lua - diff --git a/net/haproxy/patches/0012-BUG-MINOR-map-correctly-track-reference-to-the-last-ref_elt-being-dumped.patch b/net/haproxy/patches/0012-BUG-MINOR-map-correctly-track-reference-to-the-last-ref_elt-being-dumped.patch deleted file mode 100644 index af58e5f44..000000000 --- a/net/haproxy/patches/0012-BUG-MINOR-map-correctly-track-reference-to-the-last-ref_elt-being-dumped.patch +++ /dev/null @@ -1,31 +0,0 @@ -commit b2219ae216a141acdf0e2a3f67d2c85aee2a2bc2 -Author: Dragan Dosen -Date: Fri May 4 16:27:15 2018 +0200 - - BUG/MINOR: map: correctly track reference to the last ref_elt being dumped - - The bug was introduced in the commit 8d85aa4 ("BUG/MAJOR: map: fix - segfault during 'show map/acl' on cli"). - - This patch should be backported to 1.8, 1.7 and 1.6. - - (cherry picked from commit 336a11f75571ad46f74a7c6247c13ed44f95da93) - Signed-off-by: Willy Tarreau - -diff --git a/src/map.c b/src/map.c -index f40e4394..a9a1e53c 100644 ---- a/src/map.c -+++ b/src/map.c -@@ -307,9 +307,9 @@ static int cli_io_handler_pat_list(struct appctx *appctx) - * reference to the last ref_elt being dumped. - */ - if (appctx->st2 == STAT_ST_LIST) { -- if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users)) { -- LIST_DEL(&appctx->ctx.sess.bref.users); -- LIST_INIT(&appctx->ctx.sess.bref.users); -+ if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users)) { -+ LIST_DEL(&appctx->ctx.map.bref.users); -+ LIST_INIT(&appctx->ctx.map.bref.users); - } - } - return 1; diff --git a/net/haproxy/patches/0013-BUG-MEDIUM-task-Dont-free-a-task-that-is-about-to-be-run.patch b/net/haproxy/patches/0013-BUG-MEDIUM-task-Dont-free-a-task-that-is-about-to-be-run.patch deleted file mode 100644 index e802c7f6b..000000000 --- a/net/haproxy/patches/0013-BUG-MEDIUM-task-Dont-free-a-task-that-is-about-to-be-run.patch +++ /dev/null @@ -1,128 +0,0 @@ -commit a0f0db361978154474d76028183647d5991f3b5c -Author: Olivier Houchard -Date: Fri May 4 15:46:16 2018 +0200 - - BUG/MEDIUM: task: Don't free a task that is about to be run. - - While running a task, we may try to delete and free a task that is about to - be run, because it's part of the local tasks list, or because rq_next points - to it. - So flag any task that is in the local tasks list to be deleted, instead of - run, by setting t->process to NULL, and re-make rq_next a global, - thread-local variable, that is modified if we attempt to delete that task. - - Many thanks to PiBa-NL for reporting this and analysing the problem. - - This should be backported to 1.8. - - (cherry picked from commit 9b36cb4a414c22e13d344afbbe70684e9f2f1d49) - Signed-off-by: Willy Tarreau - -diff --git a/include/proto/task.h b/include/proto/task.h -index cbc1a907..c1c4c07e 100644 ---- a/include/proto/task.h -+++ b/include/proto/task.h -@@ -90,6 +90,8 @@ extern unsigned int nb_tasks_cur; - extern unsigned int niced_tasks; /* number of niced tasks in the run queue */ - extern struct pool_head *pool_head_task; - extern struct pool_head *pool_head_notification; -+extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */ -+extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be potentially run */ - - __decl_hathreads(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */ - __decl_hathreads(extern HA_SPINLOCK_T wq_lock); /* spin lock related to wait queue */ -@@ -177,8 +179,11 @@ static inline struct task *__task_unlink_rq(struct task *t) - static inline struct task *task_unlink_rq(struct task *t) - { - HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); -- if (likely(task_in_rq(t))) -+ if (likely(task_in_rq(t))) { -+ if (&t->rq == rq_next) -+ rq_next = eb32sc_next(rq_next, tid_bit); - __task_unlink_rq(t); -+ } - HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); - return t; - } -@@ -230,7 +235,7 @@ static inline struct task *task_new(unsigned long thread_mask) - * Free a task. Its context must have been freed since it will be lost. - * The task count is decremented. - */ --static inline void task_free(struct task *t) -+static inline void __task_free(struct task *t) - { - pool_free(pool_head_task, t); - if (unlikely(stopping)) -@@ -238,6 +243,18 @@ static inline void task_free(struct task *t) - HA_ATOMIC_SUB(&nb_tasks, 1); - } - -+static inline void task_free(struct task *t) -+{ -+ /* There's no need to protect t->state with a lock, as the task -+ * has to run on the current thread. -+ */ -+ if (t == curr_task || !(t->state & TASK_RUNNING)) -+ __task_free(t); -+ else -+ t->process = NULL; -+} -+ -+ - /* Place into the wait queue, where it may already be. If the expiration - * timer is infinite, do nothing and rely on wake_expired_task to clean up. - */ -diff --git a/src/task.c b/src/task.c -index fd9acf66..3d021bb4 100644 ---- a/src/task.c -+++ b/src/task.c -@@ -39,6 +39,7 @@ unsigned int nb_tasks_cur = 0; /* copy of the tasks count */ - unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */ - - THREAD_LOCAL struct task *curr_task = NULL; /* task currently running or NULL */ -+THREAD_LOCAL struct eb32sc_node *rq_next = NULL; /* Next task to be potentially run */ - - __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) rq_lock); /* spin lock related to run queue */ - __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */ -@@ -186,7 +187,6 @@ void process_runnable_tasks() - struct task *t; - int i; - int max_processed; -- struct eb32sc_node *rq_next; - struct task *local_tasks[16]; - int local_tasks_count; - int final_tasks_count; -@@ -227,8 +227,14 @@ void process_runnable_tasks() - */ - if (likely(t->process == process_stream)) - t = process_stream(t); -- else -- t = t->process(t); -+ else { -+ if (t->process != NULL) -+ t = t->process(t); -+ else { -+ __task_free(t); -+ t = NULL; -+ } -+ } - curr_task = NULL; - - if (likely(t != NULL)) { -@@ -309,8 +315,14 @@ void process_runnable_tasks() - curr_task = t; - if (likely(t->process == process_stream)) - t = process_stream(t); -- else -- t = t->process(t); -+ else { -+ if (t->process != NULL) -+ t = t->process(t); -+ else { -+ __task_free(t); -+ t = NULL; -+ } -+ } - curr_task = NULL; - if (t) - local_tasks[final_tasks_count++] = t; diff --git a/net/haproxy/patches/0014-BUG-MINOR-lua-schedule-socket-task-upon-lua-connect.patch b/net/haproxy/patches/0014-BUG-MINOR-lua-schedule-socket-task-upon-lua-connect.patch deleted file mode 100644 index f7e3cf34d..000000000 --- a/net/haproxy/patches/0014-BUG-MINOR-lua-schedule-socket-task-upon-lua-connect.patch +++ /dev/null @@ -1,48 +0,0 @@ -commit 52ec3578c3ddc688ae14da3cd3e7e351494603d8 -Author: PiBa-NL -Date: Sat May 5 23:51:42 2018 +0200 - - BUG/MINOR: lua: schedule socket task upon lua connect() - - The parameters like server-address, port and timeout should be set before - process_stream task is called to avoid the stream being 'closed' before it - got initialized properly. This is most clearly visible when running with - tune.lua.forced-yield=1.. So scheduling the task should not be done when - creating the lua socket, but when connect is called. The error - "socket: not yet initialised, you can't set timeouts." would then appear. - - Below code for example also shows this issue, as the sleep will - yield the lua code: - local con = core.tcp() - core.sleep(1) - con:settimeout(10) - - (cherry picked from commit 706d5ee0c366787536213ccd6dea264d20b76a22) - [wt: must be backported to 1.7 and 1.6 as well with a different patch, - see https://www.mail-archive.com/haproxy@formilux.org/msg29924.html] - Signed-off-by: Willy Tarreau - -diff --git a/src/hlua.c b/src/hlua.c -index 0100e7cf..5cc918c9 100644 ---- a/src/hlua.c -+++ b/src/hlua.c -@@ -2415,6 +2415,10 @@ __LJMP static int hlua_socket_connect(struct lua_State *L) - WILL_LJMP(luaL_error(L, "out of memory")); - } - xref_unlock(&socket->xref, peer); -+ -+ task_wakeup(s->task, TASK_WOKEN_INIT); -+ /* Return yield waiting for connection. */ -+ - WILL_LJMP(hlua_yieldk(L, 0, 0, hlua_socket_connect_yield, TICK_ETERNITY, 0)); - - return 0; -@@ -2566,8 +2570,6 @@ __LJMP static int hlua_socket_new(lua_State *L) - strm->flags |= SF_DIRECT | SF_ASSIGNED | SF_ADDR_SET | SF_BE_ASSIGNED; - strm->target = &socket_tcp.obj_type; - -- task_wakeup(strm->task, TASK_WOKEN_INIT); -- /* Return yield waiting for connection. */ - return 1; - - out_fail_stream: diff --git a/net/haproxy/patches/0015-BUG-MINOR-lua-ensure-large-proxy-IDs-can-be-represented.patch b/net/haproxy/patches/0015-BUG-MINOR-lua-ensure-large-proxy-IDs-can-be-represented.patch deleted file mode 100644 index dce13e4fa..000000000 --- a/net/haproxy/patches/0015-BUG-MINOR-lua-ensure-large-proxy-IDs-can-be-represented.patch +++ /dev/null @@ -1,38 +0,0 @@ -commit edb4427ab7c070a16cb9a23460f68b3fc3c041bb -Author: Willy Tarreau -Date: Sun May 6 14:50:09 2018 +0200 - - BUG/MINOR: lua: ensure large proxy IDs can be represented - - In function hlua_fcn_new_proxy() too small a buffer was passed to - snprintf(), resulting in large proxy or listener IDs to make - snprintf() fail. It is unlikely to meet this case but let's fix it - anyway. - - This fix must be backported to all stable branches where it applies. - - (cherry picked from commit 29d698040d6bb56b29c036aeba05f0d52d8ce94b) - Signed-off-by: Willy Tarreau - -diff --git a/src/hlua_fcn.c b/src/hlua_fcn.c -index a8d53d45..1df08f85 100644 ---- a/src/hlua_fcn.c -+++ b/src/hlua_fcn.c -@@ -796,7 +796,7 @@ int hlua_fcn_new_proxy(lua_State *L, struct proxy *px) - struct server *srv; - struct listener *lst; - int lid; -- char buffer[10]; -+ char buffer[17]; - - lua_newtable(L); - -@@ -836,7 +836,7 @@ int hlua_fcn_new_proxy(lua_State *L, struct proxy *px) - if (lst->name) - lua_pushstring(L, lst->name); - else { -- snprintf(buffer, 10, "sock-%d", lid); -+ snprintf(buffer, sizeof(buffer), "sock-%d", lid); - lid++; - lua_pushstring(L, buffer); - } diff --git a/net/haproxy/patches/0016-BUG-MEDIUM-http-dont-always-abort-transfers-on-CF_SHUTR.patch b/net/haproxy/patches/0016-BUG-MEDIUM-http-dont-always-abort-transfers-on-CF_SHUTR.patch deleted file mode 100644 index 0605b205d..000000000 --- a/net/haproxy/patches/0016-BUG-MEDIUM-http-dont-always-abort-transfers-on-CF_SHUTR.patch +++ /dev/null @@ -1,70 +0,0 @@ -commit 1c10e5b1b95142bb3ac385be1e60d8b180b2e99e -Author: Willy Tarreau -Date: Wed May 16 11:35:05 2018 +0200 - - BUG/MEDIUM: http: don't always abort transfers on CF_SHUTR - - Pawel Karoluk reported on Discourse[1] that HTTP/2 breaks url_param. - - Christopher managed to track it down to the HTTP_MSGF_WAIT_CONN flag - which is set there to ensure the connection is validated before sending - the headers, as we may need to rewind the stream and hash again upon - redispatch. What happens is that in the forwarding code we refrain - from forwarding when this flag is set and the connection is not yet - established, and for this we go through the missing_data_or_waiting - path. This exit path was initially designed only to wait for data - from the client, so it rightfully checks whether or not the client - has already closed since in that case it must not wait for more data. - But it also has the side effect of aborting such a transfer if the - client has closed after the request, which is exactly what happens - in H2. - - A study on the code reveals that this whole combined check should - be revisited : while it used to be true that waiting had the same - error conditions as missing data, it's not true anymore. Some other - corner cases were identified, such as the risk to report a server - close instead of a client timeout when waiting for the client to - read the last chunk of data if the shutr is already present, or - the risk to fail a redispatch when a client uploads some data and - closes before the connection establishes. The compression seems to - be at risk of rare issues there if a write to a full buffer is not - yet possible but a shutr is already queued. - - At the moment these risks are extremely unlikely but they do exist, - and their impact is very minor since it mostly concerns an issue not - being optimally handled, and the fixes risk to cause more serious - issues. Thus this patch only focuses on how the HTTP_MSGF_WAIT_CONN - is handled and leaves the rest untouched. - - This patch needs to be backported to 1.8, and could be backported to - earlier versions to properly take care of HTTP/1 requests passing via - url_param which are closed immediately after the headers, though this - is unlikely as this behaviour is only exhibited by scripts. - - [1] https://discourse.haproxy.org/t/haproxy-1-8-x-url-param-issue-in-http2/2482/13 - - (cherry picked from commit ba20dfc50161ba705a746d54ebc1a0a45c46beab) - Signed-off-by: Willy Tarreau - -diff --git a/src/proto_http.c b/src/proto_http.c -index 4c18a27c..b384cef1 100644 ---- a/src/proto_http.c -+++ b/src/proto_http.c -@@ -4865,7 +4865,8 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit) - if (!(s->res.flags & CF_READ_ATTACHED)) { - channel_auto_connect(req); - req->flags |= CF_WAKE_CONNECT; -- goto missing_data_or_waiting; -+ channel_dont_close(req); /* don't fail on early shutr */ -+ goto waiting; - } - msg->flags &= ~HTTP_MSGF_WAIT_CONN; - } -@@ -4949,6 +4950,7 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit) - goto return_bad_req_stats_ok; - } - -+ waiting: - /* waiting for the last bits to leave the buffer */ - if (req->flags & CF_SHUTW) - goto aborted_xfer; diff --git a/net/haproxy/patches/0017-BUG-MEDIUM-pollers-Use-a-global-list-for-fd-shared-between-threads.patch b/net/haproxy/patches/0017-BUG-MEDIUM-pollers-Use-a-global-list-for-fd-shared-between-threads.patch deleted file mode 100644 index b3ae30e51..000000000 --- a/net/haproxy/patches/0017-BUG-MEDIUM-pollers-Use-a-global-list-for-fd-shared-between-threads.patch +++ /dev/null @@ -1,709 +0,0 @@ -commit 954db1d01a3d706d4cacd288f28e8517a635d36e -Author: Olivier Houchard -Date: Thu May 17 18:34:02 2018 +0200 - - BUG/MEDIUM: pollers: Use a global list for fd shared between threads. - - With the old model, any fd shared by multiple threads, such as listeners - or dns sockets, would only be updated on one threads, so that could lead - to missed event, or spurious wakeups. - To avoid this, add a global list for fd that are shared, and only remove - entries from this list when every thread as updated its poller. - This subtly changes the semantics of updt_fd_polling(), as it now unlocks - the FD_LOCK on exit. - - This is similar in spirit to commit 6b96f7289c2f401deef4bdc6e20792360807dde4 - (with the bugfix from c55b88ece616afe0b28dc81eb39bad37b5f9c33f) applied, - but had to be rewrote, because of the differences between 1.8 and master. - - This should only be applied to 1.8. - -diff --git a/include/common/hathreads.h b/include/common/hathreads.h -index 325a869a..86db4d5c 100644 ---- a/include/common/hathreads.h -+++ b/include/common/hathreads.h -@@ -201,6 +201,8 @@ void thread_exit_sync(void); - int thread_no_sync(void); - int thread_need_sync(void); - -+extern unsigned long all_threads_mask; -+ - #if defined(DEBUG_THREAD) || defined(DEBUG_FULL) - - /* WARNING!!! if you update this enum, please also keep lock_label() up to date below */ -@@ -209,6 +211,7 @@ enum lock_label { - FDTAB_LOCK, - FDCACHE_LOCK, - FD_LOCK, -+ FD_UPDATE_LOCK, - POLL_LOCK, - TASK_RQ_LOCK, - TASK_WQ_LOCK, -@@ -330,6 +333,7 @@ static inline const char *lock_label(enum lock_label label) - case FDCACHE_LOCK: return "FDCACHE"; - case FD_LOCK: return "FD"; - case FDTAB_LOCK: return "FDTAB"; -+ case FD_UPDATE_LOCK: return "FD_UPDATE"; - case POLL_LOCK: return "POLL"; - case TASK_RQ_LOCK: return "TASK_RQ"; - case TASK_WQ_LOCK: return "TASK_WQ"; -diff --git a/include/proto/fd.h b/include/proto/fd.h -index bb91bb2c..b6199ccf 100644 ---- a/include/proto/fd.h -+++ b/include/proto/fd.h -@@ -43,6 +43,9 @@ extern THREAD_LOCAL int fd_nbupdt; // number of updates in the list - __decl_hathreads(extern HA_SPINLOCK_T __attribute__((aligned(64))) fdtab_lock); /* global lock to protect fdtab array */ - __decl_hathreads(extern HA_RWLOCK_T __attribute__((aligned(64))) fdcache_lock); /* global lock to protect fd_cache array */ - __decl_hathreads(extern HA_SPINLOCK_T __attribute__((aligned(64))) poll_lock); /* global lock to protect poll info */ -+__decl_hathreads(extern HA_SPINLOCK_T __attribute__((aligned(64))) fd_updt_lock); /* global lock to protect the update list */ -+ -+extern struct fdlist update_list; // Global update list - - /* Deletes an FD from the fdsets, and recomputes the maxfd limit. - * The file descriptor is also closed. -@@ -96,14 +99,70 @@ void fd_process_cached_events(); - - /* Mark fd as updated for polling and allocate an entry in the update list - * for this if it was not already there. This can be done at any time. -+ * This function expects the FD lock to be locked, and returns with the -+ * FD lock unlocked. - */ - static inline void updt_fd_polling(const int fd) - { -- if (fdtab[fd].update_mask & tid_bit) -+ if ((fdtab[fd].update_mask & fdtab[fd].thread_mask) == -+ fdtab[fd].thread_mask) { -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - /* already scheduled for update */ - return; -- fdtab[fd].update_mask |= tid_bit; -- fd_updt[fd_nbupdt++] = fd; -+ } -+ if (fdtab[fd].thread_mask == tid_bit) { -+ fdtab[fd].update_mask |= tid_bit; -+ fd_updt[fd_nbupdt++] = fd; -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ } else { -+ /* This is ugly, but we can afford to unlock the FD lock -+ * before we acquire the fd_updt_lock, to prevent a -+ * lock order reversal, because this function is only called -+ * from fd_update_cache(), and all users of fd_update_cache() -+ * used to just unlock the fd lock just after, anyway. -+ */ -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock); -+ /* If update_mask is non-nul, then it's already in the list -+ * so we don't have to add it. -+ */ -+ if (fdtab[fd].update_mask == 0) { -+ if (update_list.first == -1) { -+ update_list.first = update_list.last = fd; -+ fdtab[fd].update.next = fdtab[fd].update.prev = -1; -+ } else { -+ fdtab[update_list.last].update.next = fd; -+ fdtab[fd].update.prev = update_list.last; -+ fdtab[fd].update.next = -1; -+ update_list.last = fd; -+ } -+ } -+ fdtab[fd].update_mask |= fdtab[fd].thread_mask; -+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock); -+ -+ } -+} -+ -+/* Called from the poller to acknoledge we read an entry from the global -+ * update list, to remove our bit from the update_mask, and remove it from -+ * the list if we were the last one. -+ */ -+/* Expects to be called with the FD lock and the FD update lock held */ -+static inline void done_update_polling(int fd) -+{ -+ fdtab[fd].update_mask &= ~tid_bit; -+ if ((fdtab[fd].update_mask & all_threads_mask) == 0) { -+ if (fdtab[fd].update.prev != -1) -+ fdtab[fdtab[fd].update.prev].update.next = -+ fdtab[fd].update.next; -+ else -+ update_list.first = fdtab[fd].update.next; -+ if (fdtab[fd].update.next != -1) -+ fdtab[fdtab[fd].update.next].update.prev = -+ fdtab[fd].update.prev; -+ else -+ update_list.last = fdtab[fd].update.prev; -+ } - } - - -@@ -175,13 +234,6 @@ static inline int fd_compute_new_polled_status(int state) - */ - static inline void fd_update_cache(int fd) - { -- /* 3 states for each direction require a polling update */ -- if ((fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_ACTIVE_R)) == FD_EV_POLLED_R || -- (fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_READY_R | FD_EV_ACTIVE_R)) == FD_EV_ACTIVE_R || -- (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_ACTIVE_W)) == FD_EV_POLLED_W || -- (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_READY_W | FD_EV_ACTIVE_W)) == FD_EV_ACTIVE_W) -- updt_fd_polling(fd); -- - /* only READY and ACTIVE states (the two with both flags set) require a cache entry */ - if (((fdtab[fd].state & (FD_EV_READY_R | FD_EV_ACTIVE_R)) == (FD_EV_READY_R | FD_EV_ACTIVE_R)) || - ((fdtab[fd].state & (FD_EV_READY_W | FD_EV_ACTIVE_W)) == (FD_EV_READY_W | FD_EV_ACTIVE_W))) { -@@ -190,6 +242,14 @@ static inline void fd_update_cache(int fd) - else { - fd_release_cache_entry(fd); - } -+ /* 3 states for each direction require a polling update */ -+ if ((fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_ACTIVE_R)) == FD_EV_POLLED_R || -+ (fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_READY_R | FD_EV_ACTIVE_R)) == FD_EV_ACTIVE_R || -+ (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_ACTIVE_W)) == FD_EV_POLLED_W || -+ (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_READY_W | FD_EV_ACTIVE_W)) == FD_EV_ACTIVE_W) -+ updt_fd_polling(fd); -+ else -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - } - - /* -@@ -271,8 +331,9 @@ static inline void fd_stop_recv(int fd) - if (fd_recv_active(fd)) { - fdtab[fd].state &= ~FD_EV_ACTIVE_R; - fd_update_cache(fd); /* need an update entry to change the state */ -- } -- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ /* the FD lock is unlocked by fd_update_cache() */ -+ } else -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - } - - /* Disable processing send events on fd */ -@@ -282,8 +343,9 @@ static inline void fd_stop_send(int fd) - if (fd_send_active(fd)) { - fdtab[fd].state &= ~FD_EV_ACTIVE_W; - fd_update_cache(fd); /* need an update entry to change the state */ -- } -- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ /* the FD lock is unlocked by fd_update_cache() */ -+ } else -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - } - - /* Disable processing of events on fd for both directions. */ -@@ -293,8 +355,9 @@ static inline void fd_stop_both(int fd) - if (fd_active(fd)) { - fdtab[fd].state &= ~FD_EV_ACTIVE_RW; - fd_update_cache(fd); /* need an update entry to change the state */ -- } -- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ /* the FD lock is unlocked by fd_update_cache() */ -+ } else -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - } - - /* Report that FD cannot receive anymore without polling (EAGAIN detected). */ -@@ -304,8 +367,9 @@ static inline void fd_cant_recv(const int fd) - if (fd_recv_ready(fd)) { - fdtab[fd].state &= ~FD_EV_READY_R; - fd_update_cache(fd); /* need an update entry to change the state */ -- } -- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ /* the FD lock is unlocked by fd_update_cache() */ -+ } else -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - } - - /* Report that FD can receive anymore without polling. */ -@@ -315,8 +379,9 @@ static inline void fd_may_recv(const int fd) - if (!fd_recv_ready(fd)) { - fdtab[fd].state |= FD_EV_READY_R; - fd_update_cache(fd); /* need an update entry to change the state */ -- } -- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ /* the FD lock is unlocked by fd_update_cache() */ -+ } else -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - } - - /* Disable readiness when polled. This is useful to interrupt reading when it -@@ -330,8 +395,9 @@ static inline void fd_done_recv(const int fd) - if (fd_recv_polled(fd) && fd_recv_ready(fd)) { - fdtab[fd].state &= ~FD_EV_READY_R; - fd_update_cache(fd); /* need an update entry to change the state */ -- } -- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ /* the FD lock is unlocked by fd_update_cache() */ -+ } else -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - } - - /* Report that FD cannot send anymore without polling (EAGAIN detected). */ -@@ -341,8 +407,9 @@ static inline void fd_cant_send(const int fd) - if (fd_send_ready(fd)) { - fdtab[fd].state &= ~FD_EV_READY_W; - fd_update_cache(fd); /* need an update entry to change the state */ -- } -- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ /* the FD lock is unlocked by fd_update_cache() */ -+ } else -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - } - - /* Report that FD can send anymore without polling (EAGAIN detected). */ -@@ -352,8 +419,9 @@ static inline void fd_may_send(const int fd) - if (!fd_send_ready(fd)) { - fdtab[fd].state |= FD_EV_READY_W; - fd_update_cache(fd); /* need an update entry to change the state */ -- } -- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ /* the FD lock is unlocked by fd_update_cache() */ -+ } else -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - } - - /* Prepare FD to try to receive */ -@@ -363,8 +431,9 @@ static inline void fd_want_recv(int fd) - if (!fd_recv_active(fd)) { - fdtab[fd].state |= FD_EV_ACTIVE_R; - fd_update_cache(fd); /* need an update entry to change the state */ -- } -- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ /* the FD lock is unlocked by fd_update_cache() */ -+ } else -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - } - - /* Prepare FD to try to send */ -@@ -374,8 +443,9 @@ static inline void fd_want_send(int fd) - if (!fd_send_active(fd)) { - fdtab[fd].state |= FD_EV_ACTIVE_W; - fd_update_cache(fd); /* need an update entry to change the state */ -- } -- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ /* the FD lock is unlocked by fd_update_cache() */ -+ } else -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - } - - /* Update events seen for FD and its state if needed. This should be called -diff --git a/include/types/fd.h b/include/types/fd.h -index 9f2c5fee..8e34c624 100644 ---- a/include/types/fd.h -+++ b/include/types/fd.h -@@ -90,11 +90,24 @@ enum fd_states { - */ - #define DEAD_FD_MAGIC 0xFDDEADFD - -+struct fdlist_entry { -+ int next; -+ int prev; -+} __attribute__ ((aligned(8))); -+ -+/* head of the fd list */ -+struct fdlist { -+ int first; -+ int last; -+} __attribute__ ((aligned(8))); -+ -+ - /* info about one given fd */ - struct fdtab { - __decl_hathreads(HA_SPINLOCK_T lock); - unsigned long thread_mask; /* mask of thread IDs authorized to process the task */ - unsigned long polled_mask; /* mask of thread IDs currently polling this fd */ -+ struct fdlist_entry update; /* Entry in the global update list */ - unsigned long update_mask; /* mask of thread IDs having an update for fd */ - void (*iocb)(int fd); /* I/O handler */ - void *owner; /* the connection or listener associated with this fd, NULL if closed */ -diff --git a/src/ev_epoll.c b/src/ev_epoll.c -index 124b8163..adc15acd 100644 ---- a/src/ev_epoll.c -+++ b/src/ev_epoll.c -@@ -59,13 +59,51 @@ REGPRM1 static void __fd_clo(int fd) - } - } - -+static void _update_fd(int fd) -+{ -+ int en, opcode; -+ -+ en = fdtab[fd].state; -+ -+ if (fdtab[fd].polled_mask & tid_bit) { -+ if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) { -+ /* fd removed from poll list */ -+ opcode = EPOLL_CTL_DEL; -+ HA_ATOMIC_AND(&fdtab[fd].polled_mask, ~tid_bit); -+ } -+ else { -+ /* fd status changed */ -+ opcode = EPOLL_CTL_MOD; -+ } -+ } -+ else if ((fdtab[fd].thread_mask & tid_bit) && (en & FD_EV_POLLED_RW)) { -+ /* new fd in the poll list */ -+ opcode = EPOLL_CTL_ADD; -+ HA_ATOMIC_OR(&fdtab[fd].polled_mask, tid_bit); -+ } -+ else { -+ return; -+ } -+ -+ /* construct the epoll events based on new state */ -+ ev.events = 0; -+ if (en & FD_EV_POLLED_R) -+ ev.events |= EPOLLIN | EPOLLRDHUP; -+ -+ if (en & FD_EV_POLLED_W) -+ ev.events |= EPOLLOUT; -+ -+ ev.data.fd = fd; -+ epoll_ctl(epoll_fd[tid], opcode, fd, &ev); -+} -+ - /* - * Linux epoll() poller - */ - REGPRM2 static void _do_poll(struct poller *p, int exp) - { - int status, eo, en; -- int fd, opcode; -+ int fd; - int count; - int updt_idx; - int wait_time; -@@ -89,39 +127,31 @@ REGPRM2 static void _do_poll(struct poller *p, int exp) - en = fd_compute_new_polled_status(eo); - fdtab[fd].state = en; - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -- -- if (fdtab[fd].polled_mask & tid_bit) { -- if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) { -- /* fd removed from poll list */ -- opcode = EPOLL_CTL_DEL; -- HA_ATOMIC_AND(&fdtab[fd].polled_mask, ~tid_bit); -- } -- else { -- /* fd status changed */ -- opcode = EPOLL_CTL_MOD; -- } -- } -- else if ((fdtab[fd].thread_mask & tid_bit) && (en & FD_EV_POLLED_RW)) { -- /* new fd in the poll list */ -- opcode = EPOLL_CTL_ADD; -- HA_ATOMIC_OR(&fdtab[fd].polled_mask, tid_bit); -- } -+ _update_fd(fd); -+ } -+ fd_nbupdt = 0; -+ /* Scan the global update list */ -+ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock); -+ for (fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { -+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); -+ if (fdtab[fd].update_mask & tid_bit) -+ done_update_polling(fd); - else { -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - continue; - } -+ fdtab[fd].new = 0; - -- /* construct the epoll events based on new state */ -- ev.events = 0; -- if (en & FD_EV_POLLED_R) -- ev.events |= EPOLLIN | EPOLLRDHUP; -- -- if (en & FD_EV_POLLED_W) -- ev.events |= EPOLLOUT; -+ eo = fdtab[fd].state; -+ en = fd_compute_new_polled_status(eo); -+ fdtab[fd].state = en; - -- ev.data.fd = fd; -- epoll_ctl(epoll_fd[tid], opcode, fd, &ev); -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ if (!fdtab[fd].owner) -+ continue; -+ _update_fd(fd); - } -- fd_nbupdt = 0; -+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock); - - /* compute the epoll_wait() timeout */ - if (!exp) -@@ -208,8 +238,10 @@ static int init_epoll_per_thread() - * fd for this thread. Let's just mark them as updated, the poller will - * do the rest. - */ -- for (fd = 0; fd < maxfd; fd++) -+ for (fd = 0; fd < maxfd; fd++) { -+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); - updt_fd_polling(fd); -+ } - - return 1; - fail_fd: -diff --git a/src/ev_kqueue.c b/src/ev_kqueue.c -index 8cd6dd84..642de8b3 100644 ---- a/src/ev_kqueue.c -+++ b/src/ev_kqueue.c -@@ -33,6 +33,41 @@ static int kqueue_fd[MAX_THREADS]; // per-thread kqueue_fd - static THREAD_LOCAL struct kevent *kev = NULL; - static struct kevent *kev_out = NULL; // Trash buffer for kevent() to write the eventlist in - -+static int _update_fd(int fd, int start) -+{ -+ int en; -+ int changes = start; -+ -+ en = fdtab[fd].state; -+ -+ if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) { -+ if (!(fdtab[fd].polled_mask & tid_bit)) { -+ /* fd was not watched, it's still not */ -+ return 0; -+ } -+ /* fd totally removed from poll list */ -+ EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); -+ EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); -+ HA_ATOMIC_AND(&fdtab[fd].polled_mask, ~tid_bit); -+ } -+ else { -+ /* OK fd has to be monitored, it was either added or changed */ -+ -+ if (en & FD_EV_POLLED_R) -+ EV_SET(&kev[changes++], fd, EVFILT_READ, EV_ADD, 0, 0, NULL); -+ else if (fdtab[fd].polled_mask & tid_bit) -+ EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); -+ -+ if (en & FD_EV_POLLED_W) -+ EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL); -+ else if (fdtab[fd].polled_mask & tid_bit) -+ EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); -+ -+ HA_ATOMIC_OR(&fdtab[fd].polled_mask, tid_bit); -+ } -+ return changes; -+} -+ - /* - * kqueue() poller - */ -@@ -66,32 +101,32 @@ REGPRM2 static void _do_poll(struct poller *p, int exp) - fdtab[fd].state = en; - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - -- if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) { -- if (!(fdtab[fd].polled_mask & tid_bit)) { -- /* fd was not watched, it's still not */ -- continue; -- } -- /* fd totally removed from poll list */ -- EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); -- EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); -- HA_ATOMIC_AND(&fdtab[fd].polled_mask, ~tid_bit); -- } -- else { -- /* OK fd has to be monitored, it was either added or changed */ -+ changes = _update_fd(fd, changes); -+ } - -- if (en & FD_EV_POLLED_R) -- EV_SET(&kev[changes++], fd, EVFILT_READ, EV_ADD, 0, 0, NULL); -- else if (fdtab[fd].polled_mask & tid_bit) -- EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); -+ /* Scan the global update list */ -+ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock); -+ for (fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { -+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); -+ if (fdtab[fd].update_mask & tid_bit) -+ done_update_polling(fd); -+ else { -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ continue; -+ } -+ fdtab[fd].new = 0; - -- if (en & FD_EV_POLLED_W) -- EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL); -- else if (fdtab[fd].polled_mask & tid_bit) -- EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); -+ eo = fdtab[fd].state; -+ en = fd_compute_new_polled_status(eo); -+ fdtab[fd].state = en; - -- HA_ATOMIC_OR(&fdtab[fd].polled_mask, tid_bit); -- } -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ if (!fdtab[fd].owner) -+ continue; -+ changes = _update_fd(fd, changes); - } -+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock); -+ - if (changes) { - #ifdef EV_RECEIPT - kev[0].flags |= EV_RECEIPT; -@@ -189,8 +224,10 @@ static int init_kqueue_per_thread() - * fd for this thread. Let's just mark them as updated, the poller will - * do the rest. - */ -- for (fd = 0; fd < maxfd; fd++) -+ for (fd = 0; fd < maxfd; fd++) { -+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); - updt_fd_polling(fd); -+ } - - return 1; - fail_fd: -diff --git a/src/ev_poll.c b/src/ev_poll.c -index b7cc0bb3..c913ced2 100644 ---- a/src/ev_poll.c -+++ b/src/ev_poll.c -@@ -104,6 +104,51 @@ REGPRM2 static void _do_poll(struct poller *p, int exp) - HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock); - } - } -+ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock); -+ for (fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { -+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); -+ if (fdtab[fd].update_mask & tid_bit) { -+ /* Cheat a bit, as the state is global to all pollers -+ * we don't need every thread ot take care of the -+ * update. -+ */ -+ fdtab[fd].update_mask &= ~all_threads_mask; -+ done_update_polling(fd); -+ } else { -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ continue; -+ } -+ -+ if (!fdtab[fd].owner) { -+ activity[tid].poll_drop++; -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ continue; -+ } -+ -+ fdtab[fd].new = 0; -+ -+ eo = fdtab[fd].state; -+ en = fd_compute_new_polled_status(eo); -+ fdtab[fd].state = en; -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ -+ if ((eo ^ en) & FD_EV_POLLED_RW) { -+ /* poll status changed, update the lists */ -+ HA_SPIN_LOCK(POLL_LOCK, &poll_lock); -+ if ((eo & ~en) & FD_EV_POLLED_R) -+ hap_fd_clr(fd, fd_evts[DIR_RD]); -+ else if ((en & ~eo) & FD_EV_POLLED_R) -+ hap_fd_set(fd, fd_evts[DIR_RD]); -+ -+ if ((eo & ~en) & FD_EV_POLLED_W) -+ hap_fd_clr(fd, fd_evts[DIR_WR]); -+ else if ((en & ~eo) & FD_EV_POLLED_W) -+ hap_fd_set(fd, fd_evts[DIR_WR]); -+ HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock); -+ } -+ -+ } -+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock); - fd_nbupdt = 0; - - nbfd = 0; -diff --git a/src/ev_select.c b/src/ev_select.c -index 5f3486ed..bde923ea 100644 ---- a/src/ev_select.c -+++ b/src/ev_select.c -@@ -70,7 +70,42 @@ REGPRM2 static void _do_poll(struct poller *p, int exp) - en = fd_compute_new_polled_status(eo); - fdtab[fd].state = en; - HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ if ((eo ^ en) & FD_EV_POLLED_RW) { -+ /* poll status changed, update the lists */ -+ HA_SPIN_LOCK(POLL_LOCK, &poll_lock); -+ if ((eo & ~en) & FD_EV_POLLED_R) -+ FD_CLR(fd, fd_evts[DIR_RD]); -+ else if ((en & ~eo) & FD_EV_POLLED_R) -+ FD_SET(fd, fd_evts[DIR_RD]); -+ -+ if ((eo & ~en) & FD_EV_POLLED_W) -+ FD_CLR(fd, fd_evts[DIR_WR]); -+ else if ((en & ~eo) & FD_EV_POLLED_W) -+ FD_SET(fd, fd_evts[DIR_WR]); -+ HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock); -+ } -+ } -+ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock); -+ for (fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { -+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); -+ if (fdtab[fd].update_mask & tid_bit) { -+ /* Cheat a bit, as the state is global to all pollers -+ * we don't need every thread ot take care of the -+ * update. -+ */ -+ fdtab[fd].update_mask &= ~all_threads_mask; -+ done_update_polling(fd); -+ } else { -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); -+ continue; -+ } - -+ fdtab[fd].new = 0; -+ -+ eo = fdtab[fd].state; -+ en = fd_compute_new_polled_status(eo); -+ fdtab[fd].state = en; -+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); - if ((eo ^ en) & FD_EV_POLLED_RW) { - /* poll status changed, update the lists */ - HA_SPIN_LOCK(POLL_LOCK, &poll_lock); -@@ -85,7 +120,9 @@ REGPRM2 static void _do_poll(struct poller *p, int exp) - FD_SET(fd, fd_evts[DIR_WR]); - HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock); - } -+ - } -+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock); - fd_nbupdt = 0; - - /* let's restore fdset state */ -diff --git a/src/fd.c b/src/fd.c -index b64130ed..a134e93e 100644 ---- a/src/fd.c -+++ b/src/fd.c -@@ -175,9 +175,12 @@ unsigned long fd_cache_mask = 0; // Mask of threads with events in the cache - THREAD_LOCAL int *fd_updt = NULL; // FD updates list - THREAD_LOCAL int fd_nbupdt = 0; // number of updates in the list - -+struct fdlist update_list; // Global update list - __decl_hathreads(HA_SPINLOCK_T fdtab_lock); /* global lock to protect fdtab array */ - __decl_hathreads(HA_RWLOCK_T fdcache_lock); /* global lock to protect fd_cache array */ - __decl_hathreads(HA_SPINLOCK_T poll_lock); /* global lock to protect poll info */ -+__decl_hathreads(HA_SPINLOCK_T) fd_updt_lock; /* global lock to protect the update list */ -+ - - /* Deletes an FD from the fdsets, and recomputes the maxfd limit. - * The file descriptor is also closed. -@@ -341,6 +344,9 @@ int init_pollers() - HA_SPIN_INIT(&fdtab_lock); - HA_RWLOCK_INIT(&fdcache_lock); - HA_SPIN_INIT(&poll_lock); -+ HA_SPIN_INIT(&fd_updt_lock); -+ update_list.first = update_list.last = -1; -+ - do { - bp = NULL; - for (p = 0; p < nbpollers; p++) -diff --git a/src/hathreads.c b/src/hathreads.c -index 944a0d5b..66ed482a 100644 ---- a/src/hathreads.c -+++ b/src/hathreads.c -@@ -31,7 +31,7 @@ void thread_sync_io_handler(int fd) - static HA_SPINLOCK_T sync_lock; - static int threads_sync_pipe[2]; - static unsigned long threads_want_sync = 0; --static unsigned long all_threads_mask = 0; -+unsigned long all_threads_mask = 0; - - #if defined(DEBUG_THREAD) || defined(DEBUG_FULL) - struct lock_stat lock_stats[LOCK_LABELS]; diff --git a/net/haproxy/patches/0018-BUG-MEDIUM-ssl-properly-protect-SSL-cert-generation.patch b/net/haproxy/patches/0018-BUG-MEDIUM-ssl-properly-protect-SSL-cert-generation.patch deleted file mode 100644 index b94bce939..000000000 --- a/net/haproxy/patches/0018-BUG-MEDIUM-ssl-properly-protect-SSL-cert-generation.patch +++ /dev/null @@ -1,45 +0,0 @@ -commit f571613244e4c02ca7aada30c89a6244d09d58d4 -Author: Willy Tarreau -Date: Thu May 17 10:56:47 2018 +0200 - - BUG/MEDIUM: ssl: properly protect SSL cert generation - - Commit 821bb9b ("MAJOR: threads/ssl: Make SSL part thread-safe") added - insufficient locking to the cert lookup and generation code : it uses - lru64_lookup(), which will automatically remove and add a list element - to the LRU list. It cannot be simply read-locked. - - A long-term improvement should consist in using a lockless mechanism - in lru64_lookup() to safely move the list element at the head. For now - let's simply use a write lock during the lookup. The effect will be - minimal since it's used only in conjunction with automatically generated - certificates, which are much more expensive and rarely used. - - This fix must be backported to 1.8. - - (cherry picked from commit 03f4ec47d9ffff629b07dcba9f0f134a7c7e44b2) - Signed-off-by: William Lallemand - -diff --git a/src/ssl_sock.c b/src/ssl_sock.c -index 1196d111..9fb2bb15 100644 ---- a/src/ssl_sock.c -+++ b/src/ssl_sock.c -@@ -1812,15 +1812,15 @@ ssl_sock_assign_generated_cert(unsigned int key, struct bind_conf *bind_conf, SS - struct lru64 *lru = NULL; - - if (ssl_ctx_lru_tree) { -- HA_RWLOCK_RDLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); -+ HA_RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); - lru = lru64_lookup(key, ssl_ctx_lru_tree, bind_conf->ca_sign_cert, 0); - if (lru && lru->domain) { - if (ssl) - SSL_set_SSL_CTX(ssl, (SSL_CTX *)lru->data); -- HA_RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); -+ HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); - return (SSL_CTX *)lru->data; - } -- HA_RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); -+ HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); - } - return NULL; - }