Browse Source

haproxy: Update HAProxy to v1.8.9

- Update haproxy download URL and hash
- Removed all obsolete patches
- Added logic to Makefile to only append the patch-version to the HA-Proxy version if we actually applied any patches (PKG_RELEASE!=00)

Signed-off-by: Christian Lachner <gladiac@gmail.com>
lilik-openwrt-22.03
Christian Lachner 7 years ago
parent
commit
4ae1c3307a
21 changed files with 9 additions and 2036 deletions
  1. +8
    -4
      net/haproxy/Makefile
  2. +1
    -1
      net/haproxy/get-latest-patches.sh
  3. +0
    -26
      net/haproxy/patches/0000-BUG-MINOR-pattern-Add-a-missing-HA_SPIN_INIT-in-pat_ref_newid.patch
  4. +0
    -87
      net/haproxy/patches/0001-BUG-MAJOR-channel-Fix-crash-when-trying-to-read-from-a-closed-socket.patch
  5. +0
    -50
      net/haproxy/patches/0002-BUG-MINOR-log-t_idle-Ti-is-not-set-for-some-requests.patch
  6. +0
    -48
      net/haproxy/patches/0003-BUG-MEDIUM-lua-Fix-segmentation-fault-if-a-Lua-task-exits.patch
  7. +0
    -147
      net/haproxy/patches/0004-MINOR-h2-detect-presence-of-CONNECT-and-or-content-length.patch
  8. +0
    -164
      net/haproxy/patches/0005-BUG-MEDIUM-h2-implement-missing-support-for-chunked-encoded-uploads.patch
  9. +0
    -36
      net/haproxy/patches/0006-BUG-MINOR-lua-threads-Make-luas-tasks-sticky-to-the-current-thread.patch
  10. +0
    -31
      net/haproxy/patches/0007-BUG-MINOR-config-disable-http-reuse-on-TCP-proxies.patch
  11. +0
    -42
      net/haproxy/patches/0008-BUG-MINOR-checks-Fix-check--health-computation-for-flapping-servers.patch
  12. +0
    -48
      net/haproxy/patches/0009-BUG-MEDIUM-threads-Fix-the-sync-point-for-more-than-32-threads.patch
  13. +0
    -31
      net/haproxy/patches/0010-BUG-MINOR-lua-Put-tasks-to-sleep-when-waiting-for-data.patch
  14. +0
    -252
      net/haproxy/patches/0011-DOC-MINOR-clean-up-LUA-documentation-re-servers-array-table.patch
  15. +0
    -31
      net/haproxy/patches/0012-BUG-MINOR-map-correctly-track-reference-to-the-last-ref_elt-being-dumped.patch
  16. +0
    -128
      net/haproxy/patches/0013-BUG-MEDIUM-task-Dont-free-a-task-that-is-about-to-be-run.patch
  17. +0
    -48
      net/haproxy/patches/0014-BUG-MINOR-lua-schedule-socket-task-upon-lua-connect.patch
  18. +0
    -38
      net/haproxy/patches/0015-BUG-MINOR-lua-ensure-large-proxy-IDs-can-be-represented.patch
  19. +0
    -70
      net/haproxy/patches/0016-BUG-MEDIUM-http-dont-always-abort-transfers-on-CF_SHUTR.patch
  20. +0
    -709
      net/haproxy/patches/0017-BUG-MEDIUM-pollers-Use-a-global-list-for-fd-shared-between-threads.patch
  21. +0
    -45
      net/haproxy/patches/0018-BUG-MEDIUM-ssl-properly-protect-SSL-cert-generation.patch

+ 8
- 4
net/haproxy/Makefile View File

@ -9,17 +9,21 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=haproxy
PKG_VERSION:=1.8.8
PKG_RELEASE:=05
PKG_VERSION:=1.8.9
PKG_RELEASE:=00
PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://www.haproxy.org/download/1.8/src/
PKG_HASH:=bcc05ab824bd2f89b8b21ac05459c0a0a0e02247b57ffe441d52cfe771daea92
PKG_HASH:=436b77927cd85bcd4c2cb3cbf7fb539a5362d9686fdcfa34f37550ca1f5db102
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION)
PKG_LICENSE:=GPL-2.0
MAINTAINER:=Thomas Heil <heil@terminal-consulting.de>
ifneq ($(PKG_RELEASE),00)
BUILD_VERSION:=-patch$(PKG_RELEASE)
endif
include $(INCLUDE_DIR)/package.mk
define Package/haproxy/Default
@ -143,7 +147,7 @@ define Build/Compile
SMALL_OPTS="-DBUFSIZE=16384 -DMAXREWRITE=1030 -DSYSTEM_MAXCONN=165530 " \
USE_LINUX_TPROXY=1 USE_LINUX_SPLICE=1 USE_TFO=1 \
USE_ZLIB=yes USE_PCRE=1 USE_PCRE_JIT=1 USE_GETADDRINFO=1 \
VERSION="$(PKG_VERSION)-patch$(PKG_RELEASE)" \
VERSION="$(PKG_VERSION)$(BUILD_VERSION)" \
$(ADDON) \
CFLAGS="$(TARGET_CFLAGS)" \
LD="$(TARGET_CC)" \


+ 1
- 1
net/haproxy/get-latest-patches.sh View File

@ -1,7 +1,7 @@
#!/bin/bash
CLONEURL=http://git.haproxy.org/git/haproxy-1.8.git
BASE_TAG=v1.8.8
BASE_TAG=v1.8.9
TMP_REPODIR=tmprepo
PATCHESDIR=patches


+ 0
- 26
net/haproxy/patches/0000-BUG-MINOR-pattern-Add-a-missing-HA_SPIN_INIT-in-pat_ref_newid.patch View File

@ -1,26 +0,0 @@
commit 6c9efc8219e35f4eb17e94b364f4c371cfb56cca
Author: Aurélien Nephtali <aurelien.nephtali@corp.ovh.com>
Date: Thu Apr 19 16:56:07 2018 +0200
BUG/MINOR: pattern: Add a missing HA_SPIN_INIT() in pat_ref_newid()
pat_ref_newid() is lacking a spinlock init. It was probably forgotten
in b5997f740b ("MAJOR: threads/map: Make acls/maps thread safe").
Signed-off-by: Aurélien Nephtali <aurelien.nephtali@corp.ovh.com>
(cherry picked from commit 564d15a71ecb3ae3372767866335cfbc068c4b48)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/pattern.c b/src/pattern.c
index fe672f12..2eb82650 100644
--- a/src/pattern.c
+++ b/src/pattern.c
@@ -1906,7 +1906,7 @@ struct pat_ref *pat_ref_newid(int unique_id, const char *display, unsigned int f
ref->unique_id = unique_id;
LIST_INIT(&ref->head);
LIST_INIT(&ref->pat);
-
+ HA_SPIN_INIT(&ref->lock);
LIST_ADDQ(&pattern_reference, &ref->list);
return ref;

+ 0
- 87
net/haproxy/patches/0001-BUG-MAJOR-channel-Fix-crash-when-trying-to-read-from-a-closed-socket.patch View File

@ -1,87 +0,0 @@
commit e0f6d4a4e8696140d1fcff812fb287d534d702e9
Author: Tim Duesterhus <tim@bastelstu.be>
Date: Tue Apr 24 19:20:43 2018 +0200
BUG/MAJOR: channel: Fix crash when trying to read from a closed socket
When haproxy is compiled using GCC <= 3.x or >= 5.x the `unlikely`
macro performs a comparison with zero: `(x) != 0`, thus returning
either 0 or 1.
In `int co_getline_nc()` this macro was accidentally applied to
the variable `retcode` itself, instead of the result of the
comparison `retcode <= 0`. As a result any negative `retcode`
is converted to `1` for purposes of the comparison.
Thus never taking the branch (and exiting the function) for
negative values.
This in turn leads to reads of uninitialized memory in the for-loop
below:
==12141== Conditional jump or move depends on uninitialised value(s)
==12141== at 0x4EB6B4: co_getline_nc (channel.c:346)
==12141== by 0x421CA4: hlua_socket_receive_yield (hlua.c:1713)
==12141== by 0x421F6F: hlua_socket_receive (hlua.c:1896)
==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529B497: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529711A: lua_pcallk (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x52ABDF0: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529A9F1: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529B523: lua_resume (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141==
==12141== Use of uninitialised value of size 8
==12141== at 0x4EB6B9: co_getline_nc (channel.c:346)
==12141== by 0x421CA4: hlua_socket_receive_yield (hlua.c:1713)
==12141== by 0x421F6F: hlua_socket_receive (hlua.c:1896)
==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529B497: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529711A: lua_pcallk (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x52ABDF0: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529A9F1: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529B523: lua_resume (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141==
==12141== Invalid read of size 1
==12141== at 0x4EB6B9: co_getline_nc (channel.c:346)
==12141== by 0x421CA4: hlua_socket_receive_yield (hlua.c:1713)
==12141== by 0x421F6F: hlua_socket_receive (hlua.c:1896)
==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529B497: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529711A: lua_pcallk (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x52ABDF0: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529B08F: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x52A7EFC: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529A9F1: ??? (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== by 0x529B523: lua_resume (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==12141== Address 0x8637171e928bb500 is not stack'd, malloc'd or (recently) free'd
Fix this bug by correctly applying the `unlikely` macro to the result of the comparison.
This bug exists as of commit ca16b038132444dea06e6d83953034128a812bce
which is the first commit adding this function.
v1.6-dev1 is the first tag containing this commit, the fix should
be backported to haproxy 1.6 and newer.
(cherry picked from commit 45be38c9c7ba2b20806f2b887876db4fb5b9457c)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/channel.c b/src/channel.c
index bd5c4de0..3770502c 100644
--- a/src/channel.c
+++ b/src/channel.c
@@ -340,7 +340,7 @@ int co_getline_nc(const struct channel *chn,
int l;
retcode = co_getblk_nc(chn, blk1, len1, blk2, len2);
- if (unlikely(retcode) <= 0)
+ if (unlikely(retcode <= 0))
return retcode;
for (l = 0; l < *len1 && (*blk1)[l] != '\n'; l++);

+ 0
- 50
net/haproxy/patches/0002-BUG-MINOR-log-t_idle-Ti-is-not-set-for-some-requests.patch View File

@ -1,50 +0,0 @@
commit 0e645ba57ddff9163a3d9b5626f189e974e671bd
Author: Rian McGuire <rian.mcguire@stileeducation.com>
Date: Tue Apr 24 11:19:21 2018 -0300
BUG/MINOR: log: t_idle (%Ti) is not set for some requests
If TCP content inspection is used, msg_state can be >= HTTP_MSG_ERROR
the first time http_wait_for_request is called. t_idle was being left
unset in that case.
In the example below :
stick-table type string len 64 size 100k expire 60s
tcp-request inspect-delay 1s
tcp-request content track-sc1 hdr(X-Session)
%Ti will always be -1, because the msg_state is already at HTTP_MSG_BODY
when http_wait_for_request is called for the first time.
This patch should backported to 1.8 and 1.7.
(cherry picked from commit 89fcb7d929283e904cabad58de495d62fc753da2)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/proto_http.c b/src/proto_http.c
index b38dd84f..4c18a27c 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -1618,18 +1618,16 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
/* we're speaking HTTP here, so let's speak HTTP to the client */
s->srv_error = http_return_srv_error;
+ /* If there is data available for analysis, log the end of the idle time. */
+ if (buffer_not_empty(req->buf) && s->logs.t_idle == -1)
+ s->logs.t_idle = tv_ms_elapsed(&s->logs.tv_accept, &now) - s->logs.t_handshake;
+
/* There's a protected area at the end of the buffer for rewriting
* purposes. We don't want to start to parse the request if the
* protected area is affected, because we may have to move processed
* data later, which is much more complicated.
*/
if (buffer_not_empty(req->buf) && msg->msg_state < HTTP_MSG_ERROR) {
-
- /* This point is executed when some data is avalaible for analysis,
- * so we log the end of the idle time. */
- if (s->logs.t_idle == -1)
- s->logs.t_idle = tv_ms_elapsed(&s->logs.tv_accept, &now) - s->logs.t_handshake;
-
if (txn->flags & TX_NOT_FIRST) {
if (unlikely(!channel_is_rewritable(req))) {
if (req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))

+ 0
- 48
net/haproxy/patches/0003-BUG-MEDIUM-lua-Fix-segmentation-fault-if-a-Lua-task-exits.patch View File

@ -1,48 +0,0 @@
commit 17f3e16826e5b1a3f79b7421d69bb85be09a4ad9
Author: Tim Duesterhus <tim@bastelstu.be>
Date: Tue Apr 24 13:56:01 2018 +0200
BUG/MEDIUM: lua: Fix segmentation fault if a Lua task exits
PiBa-NL reported that haproxy crashes with a segmentation fault
if a function registered using `core.register_task` returns.
An example Lua script that reproduces the bug is:
mytask = function()
core.Info("Stopping task")
end
core.register_task(mytask)
The Valgrind output is as follows:
==6759== Process terminating with default action of signal 11 (SIGSEGV)
==6759== Access not within mapped region at address 0x20
==6759== at 0x5B60AA9: lua_sethook (in /usr/lib/x86_64-linux-gnu/liblua5.3.so.0.0.0)
==6759== by 0x430264: hlua_ctx_resume (hlua.c:1009)
==6759== by 0x43BB68: hlua_process_task (hlua.c:5525)
==6759== by 0x4FED0A: process_runnable_tasks (task.c:231)
==6759== by 0x4B2256: run_poll_loop (haproxy.c:2397)
==6759== by 0x4B2256: run_thread_poll_loop (haproxy.c:2459)
==6759== by 0x41A7E4: main (haproxy.c:3049)
Add the missing `task = NULL` for the `HLUA_E_OK` case. The error cases
have been fixed as of 253e53e661c49fb9723535319cf511152bf09bc7 which
first was included in haproxy v1.8-dev3. This bugfix should be backported
to haproxy 1.8.
(cherry picked from commit cd235c60425dbe66c9015a357369afacc4880211)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/hlua.c b/src/hlua.c
index 4e759c7c..d4b7ce91 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -5528,6 +5528,7 @@ static struct task *hlua_process_task(struct task *task)
hlua_ctx_destroy(hlua);
task_delete(task);
task_free(task);
+ task = NULL;
break;
case HLUA_E_AGAIN: /* co process or timeout wake me later. */

+ 0
- 147
net/haproxy/patches/0004-MINOR-h2-detect-presence-of-CONNECT-and-or-content-length.patch View File

@ -1,147 +0,0 @@
commit a8bcc7dd3fe5aa615f21e795375ff9225f004498
Author: Willy Tarreau <w@1wt.eu>
Date: Wed Apr 25 18:13:58 2018 +0200
MINOR: h2: detect presence of CONNECT and/or content-length
We'll need this in order to support uploading chunks. The h2 to h1
converter checks for the presence of the content-length header field
as well as the CONNECT method and returns these information to the
caller. The caller indicates whether or not a body is detected for
the message (presence of END_STREAM or not). No transfer-encoding
header is emitted yet.
(cherry picked from commit 174b06a572ef141f15d8b7ea64eb6b34ec4c9af1)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/include/common/h2.h b/include/common/h2.h
index 65c5ab1c..576ed105 100644
--- a/include/common/h2.h
+++ b/include/common/h2.h
@@ -145,9 +145,15 @@ enum h2_err {
"\x0d\x0a\x53\x4d\x0d\x0a\x0d\x0a"
+/* some flags related to protocol parsing */
+#define H2_MSGF_BODY 0x0001 // a body is present
+#define H2_MSGF_BODY_CL 0x0002 // content-length is present
+#define H2_MSGF_BODY_TUNNEL 0x0004 // a tunnel is in use (CONNECT)
+
+
/* various protocol processing functions */
-int h2_make_h1_request(struct http_hdr *list, char *out, int osize);
+int h2_make_h1_request(struct http_hdr *list, char *out, int osize, unsigned int *msgf);
/*
* Some helpful debugging functions.
diff --git a/src/h2.c b/src/h2.c
index 43ed7f3c..7d9ddd50 100644
--- a/src/h2.c
+++ b/src/h2.c
@@ -36,9 +36,10 @@
* stored in <phdr[]>. <fields> indicates what was found so far. This should be
* called once at the detection of the first general header field or at the end
* of the request if no general header field was found yet. Returns 0 on success
- * or a negative error code on failure.
+ * or a negative error code on failure. Upon success, <msgf> is updated with a
+ * few H2_MSGF_* flags indicating what was found while parsing.
*/
-static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr, char *end)
+static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr, char *end, unsigned int *msgf)
{
char *out = *ptr;
int uri_idx = H2_PHDR_IDX_PATH;
@@ -62,6 +63,7 @@ static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr,
}
// otherwise OK ; let's use the authority instead of the URI
uri_idx = H2_PHDR_IDX_AUTH;
+ *msgf |= H2_MSGF_BODY_TUNNEL;
}
else if ((fields & (H2_PHDR_FND_METH|H2_PHDR_FND_SCHM|H2_PHDR_FND_PATH)) !=
(H2_PHDR_FND_METH|H2_PHDR_FND_SCHM|H2_PHDR_FND_PATH)) {
@@ -113,6 +115,10 @@ static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr,
* for a max of <osize> bytes, and the amount of bytes emitted is returned. In
* case of error, a negative error code is returned.
*
+ * Upon success, <msgf> is filled with a few H2_MSGF_* flags indicating what
+ * was found while parsing. The caller must set it to zero in or H2_MSGF_BODY
+ * if a body is detected (!ES).
+ *
* The headers list <list> must be composed of :
* - n.name != NULL, n.len > 0 : literal header name
* - n.name == NULL, n.len > 0 : indexed pseudo header name number <n.len>
@@ -124,7 +130,7 @@ static int h2_prepare_h1_reqline(uint32_t fields, struct ist *phdr, char **ptr,
* The Cookie header will be reassembled at the end, and for this, the <list>
* will be used to create a linked list, so its contents may be destroyed.
*/
-int h2_make_h1_request(struct http_hdr *list, char *out, int osize)
+int h2_make_h1_request(struct http_hdr *list, char *out, int osize, unsigned int *msgf)
{
struct ist phdr_val[H2_PHDR_NUM_ENTRIES];
char *out_end = out + osize;
@@ -176,7 +182,7 @@ int h2_make_h1_request(struct http_hdr *list, char *out, int osize)
/* regular header field in (name,value) */
if (!(fields & H2_PHDR_FND_NONE)) {
/* no more pseudo-headers, time to build the request line */
- ret = h2_prepare_h1_reqline(fields, phdr_val, &out, out_end);
+ ret = h2_prepare_h1_reqline(fields, phdr_val, &out, out_end, msgf);
if (ret != 0)
goto leave;
fields |= H2_PHDR_FND_NONE;
@@ -185,6 +191,10 @@ int h2_make_h1_request(struct http_hdr *list, char *out, int osize)
if (isteq(list[idx].n, ist("host")))
fields |= H2_PHDR_FND_HOST;
+ if ((*msgf & (H2_MSGF_BODY|H2_MSGF_BODY_TUNNEL|H2_MSGF_BODY_CL)) == H2_MSGF_BODY &&
+ isteq(list[idx].n, ist("content-length")))
+ *msgf |= H2_MSGF_BODY_CL;
+
/* these ones are forbidden in requests (RFC7540#8.1.2.2) */
if (isteq(list[idx].n, ist("connection")) ||
isteq(list[idx].n, ist("proxy-connection")) ||
@@ -232,7 +242,7 @@ int h2_make_h1_request(struct http_hdr *list, char *out, int osize)
/* Let's dump the request now if not yet emitted. */
if (!(fields & H2_PHDR_FND_NONE)) {
- ret = h2_prepare_h1_reqline(fields, phdr_val, &out, out_end);
+ ret = h2_prepare_h1_reqline(fields, phdr_val, &out, out_end, msgf);
if (ret != 0)
goto leave;
}
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 4fde7fcc..82dd414a 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -2626,6 +2626,7 @@ static int h2_frt_decode_headers(struct h2s *h2s, struct buffer *buf, int count)
struct chunk *tmp = get_trash_chunk();
struct http_hdr list[MAX_HTTP_HDR * 2];
struct chunk *copy = NULL;
+ unsigned int msgf;
int flen = h2c->dfl;
int outlen = 0;
int wrap;
@@ -2727,13 +2728,22 @@ static int h2_frt_decode_headers(struct h2s *h2s, struct buffer *buf, int count)
}
/* OK now we have our header list in <list> */
- outlen = h2_make_h1_request(list, bi_end(buf), try);
+ msgf = (h2c->dff & H2_F_DATA_END_STREAM) ? 0 : H2_MSGF_BODY;
+ outlen = h2_make_h1_request(list, bi_end(buf), try, &msgf);
if (outlen < 0) {
h2c_error(h2c, H2_ERR_COMPRESSION_ERROR);
goto fail;
}
+ if (msgf & H2_MSGF_BODY) {
+ /* a payload is present */
+ if (msgf & H2_MSGF_BODY_CL)
+ h2s->flags |= H2_SF_DATA_CLEN;
+ else if (!(msgf & H2_MSGF_BODY_TUNNEL))
+ h2s->flags |= H2_SF_DATA_CHNK;
+ }
+
/* now consume the input data */
bi_del(h2c->dbuf, h2c->dfl);
h2c->st0 = H2_CS_FRAME_H;

+ 0
- 164
net/haproxy/patches/0005-BUG-MEDIUM-h2-implement-missing-support-for-chunked-encoded-uploads.patch View File

@ -1,164 +0,0 @@
commit 05657bd24ebaf20e5c508a435be9a0830591f033
Author: Willy Tarreau <w@1wt.eu>
Date: Wed Apr 25 20:44:22 2018 +0200
BUG/MEDIUM: h2: implement missing support for chunked encoded uploads
Upload requests not carrying a content-length nor tunnelling data must
be sent chunked-encoded over HTTP/1. The code was planned but for some
reason forgotten during the implementation, leading to such payloads to
be sent as tunnelled data.
Browsers always emit a content length in uploads so this problem doesn't
happen for most sites. However some applications may send data frames
after a request without indicating it earlier.
The only way to detect that a client will need to send data is that the
HEADERS frame doesn't hold the ES bit. In this case it's wise to look
for the content-length header. If it's not there, either we're in tunnel
(CONNECT method) or chunked-encoding (other methods).
This patch implements this.
The following request is sent using content-length :
curl --http2 -sk https://127.0.0.1:4443/s2 -XPOST -T /large/file
and these ones using chunked-encoding :
curl --http2 -sk https://127.0.0.1:4443/s2 -XPUT -T /large/file
curl --http2 -sk https://127.0.0.1:4443/s2 -XPUT -T - < /dev/urandom
Thanks to Robert Samuel Newson for raising this issue with details.
This fix must be backported to 1.8.
(cherry picked from commit eba10f24b7da27cde60d2db24aeb1147e1657579)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/h2.c b/src/h2.c
index 7d9ddd50..5c83d6b6 100644
--- a/src/h2.c
+++ b/src/h2.c
@@ -262,6 +262,14 @@ int h2_make_h1_request(struct http_hdr *list, char *out, int osize, unsigned int
*(out++) = '\n';
}
+ if ((*msgf & (H2_MSGF_BODY|H2_MSGF_BODY_TUNNEL|H2_MSGF_BODY_CL)) == H2_MSGF_BODY) {
+ /* add chunked encoding */
+ if (out + 28 > out_end)
+ goto fail;
+ memcpy(out, "transfer-encoding: chunked\r\n", 28);
+ out += 28;
+ }
+
/* now we may have to build a cookie list. We'll dump the values of all
* visited headers.
*/
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 82dd414a..5f1da0df 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -2785,6 +2785,7 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count)
struct h2c *h2c = h2s->h2c;
int block1, block2;
unsigned int flen = h2c->dfl;
+ unsigned int chklen = 0;
h2s->cs->flags &= ~CS_FL_RCV_MORE;
h2c->flags &= ~H2_CF_DEM_SFULL;
@@ -2820,14 +2821,35 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count)
return 0;
}
+ /* chunked-encoding requires more room */
+ if (h2s->flags & H2_SF_DATA_CHNK) {
+ chklen = MIN(flen, count);
+ chklen = (chklen < 16) ? 1 : (chklen < 256) ? 2 :
+ (chklen < 4096) ? 3 : (chklen < 65536) ? 4 :
+ (chklen < 1048576) ? 4 : 8;
+ chklen += 4; // CRLF, CRLF
+ }
+
/* does it fit in output buffer or should we wait ? */
- if (flen > count) {
- flen = count;
- if (!flen) {
- h2c->flags |= H2_CF_DEM_SFULL;
- h2s->cs->flags |= CS_FL_RCV_MORE;
- return 0;
- }
+ if (flen + chklen > count) {
+ if (chklen >= count)
+ goto full;
+ flen = count - chklen;
+ }
+
+ if (h2s->flags & H2_SF_DATA_CHNK) {
+ /* emit the chunk size */
+ unsigned int chksz = flen;
+ char str[10];
+ char *beg;
+
+ beg = str + sizeof(str);
+ *--beg = '\n';
+ *--beg = '\r';
+ do {
+ *--beg = hextab[chksz & 0xF];
+ } while (chksz >>= 4);
+ bi_putblk(buf, beg, str + sizeof(str) - beg);
}
/* Block1 is the length of the first block before the buffer wraps,
@@ -2844,6 +2866,11 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count)
if (block2)
bi_putblk(buf, b_ptr(h2c->dbuf, block1), block2);
+ if (h2s->flags & H2_SF_DATA_CHNK) {
+ /* emit the CRLF */
+ bi_putblk(buf, "\r\n", 2);
+ }
+
/* now mark the input data as consumed (will be deleted from the buffer
* by the caller when seeing FRAME_A after sending the window update).
*/
@@ -2854,15 +2881,22 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count)
if (h2c->dfl > h2c->dpl) {
/* more data available, transfer stalled on stream full */
- h2c->flags |= H2_CF_DEM_SFULL;
- h2s->cs->flags |= CS_FL_RCV_MORE;
- return flen;
+ goto more;
}
end_transfer:
/* here we're done with the frame, all the payload (except padding) was
* transferred.
*/
+
+ if (h2c->dff & H2_F_DATA_END_STREAM && h2s->flags & H2_SF_DATA_CHNK) {
+ /* emit the trailing 0 CRLF CRLF */
+ if (count < 5)
+ goto more;
+ chklen += 5;
+ bi_putblk(buf, "0\r\n\r\n", 5);
+ }
+
h2c->rcvd_c += h2c->dpl;
h2c->rcvd_s += h2c->dpl;
h2c->dpl = 0;
@@ -2877,7 +2911,13 @@ static int h2_frt_transfer_data(struct h2s *h2s, struct buffer *buf, int count)
h2s->flags |= H2_SF_ES_RCVD;
}
- return flen;
+ return flen + chklen;
+ full:
+ flen = chklen = 0;
+ more:
+ h2c->flags |= H2_CF_DEM_SFULL;
+ h2s->cs->flags |= CS_FL_RCV_MORE;
+ return flen + chklen;
}
/*

+ 0
- 36
net/haproxy/patches/0006-BUG-MINOR-lua-threads-Make-luas-tasks-sticky-to-the-current-thread.patch View File

@ -1,36 +0,0 @@
commit 8b8d55be7e94ee3d758d41a21fa86a036e91a264
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Wed Apr 25 10:34:45 2018 +0200
BUG/MINOR: lua/threads: Make lua's tasks sticky to the current thread
PiBa-NL reported a bug with tasks registered in lua when HAProxy is started with
serveral threads. These tasks have not specific affinity with threads so they
can be woken up on any threads. So, it is impossbile for these tasks to handled
cosockets or applets, because cosockets and applets are sticky on the thread
which created them. It is forbbiden to manipulate a cosocket from another
thread.
So to fix the bug, tasks registered in lua are now sticky to the current
thread. Because these tasks can be registered before threads creation, the
affinity is set the first time a lua's task is processed.
This patch must be backported in HAProxy 1.8.
(cherry picked from commit 5bc9972ed836517924eea91954d255d317a53418)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/hlua.c b/src/hlua.c
index d4b7ce91..bd0b87e3 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -5513,6 +5513,9 @@ static struct task *hlua_process_task(struct task *task)
struct hlua *hlua = task->context;
enum hlua_exec status;
+ if (task->thread_mask == MAX_THREADS_MASK)
+ task_set_affinity(task, tid_bit);
+
/* If it is the first call to the task, we must initialize the
* execution timeouts.
*/

+ 0
- 31
net/haproxy/patches/0007-BUG-MINOR-config-disable-http-reuse-on-TCP-proxies.patch View File

@ -1,31 +0,0 @@
commit 80e179128cfd78d95cdebf7195fd21299e7931b6
Author: Willy Tarreau <w@1wt.eu>
Date: Sat Apr 28 07:18:15 2018 +0200
BUG/MINOR: config: disable http-reuse on TCP proxies
Louis Chanouha reported an inappropriate warning when http-reuse is
present in a defaults section while a TCP proxy accidently inherits
it and finds a conflict with other options like the use of the PROXY
protocol. To fix this patch removes the http-reuse option for TCP
proxies.
This fix needs to be backported to 1.8, 1.7 and possibly 1.6.
(cherry picked from commit 46deab6e64bfda7211b7c3199ad01f136141c86f)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/cfgparse.c b/src/cfgparse.c
index 5a460381..63d2de58 100644
--- a/src/cfgparse.c
+++ b/src/cfgparse.c
@@ -8702,6 +8702,9 @@ out_uri_auth_compat:
}
#endif
+ if ((curproxy->mode != PR_MODE_HTTP) && (curproxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR)
+ curproxy->options &= ~PR_O_REUSE_MASK;
+
if ((curproxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) {
if ((curproxy->conn_src.opts & CO_SRC_TPROXY_MASK) == CO_SRC_TPROXY_CLI ||
(curproxy->conn_src.opts & CO_SRC_TPROXY_MASK) == CO_SRC_TPROXY_CIP ||

+ 0
- 42
net/haproxy/patches/0008-BUG-MINOR-checks-Fix-check--health-computation-for-flapping-servers.patch View File

@ -1,42 +0,0 @@
commit edb5a1efd22eb9918574d962640cd2ae3bb45ad3
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Wed May 2 12:12:45 2018 +0200
BUG/MINOR: checks: Fix check->health computation for flapping servers
This patch fixes an old bug introduced in the commit 7b1d47ce ("MAJOR: checks:
move health checks changes to set_server_check_status()"). When a DOWN server is
flapping, everytime a check succeds, check->health is incremented. But when a
check fails, it is decremented only when it is higher than the rise value. So if
only one check succeds for a DOWN server, check->health will remain set to 1 for
all subsequent failing checks.
So, at first glance, it seems not that terrible because the server remains
DOWN. But it is reported in the transitional state "DOWN server, going up". And
it will remain in this state until it is UP again. And there is also an
insidious side effect. If a DOWN server is flapping time to time, It will end to
be considered UP after a uniq successful check, , regardless the rise threshold,
because check->health will be increased slowly and never decreased.
To fix the bug, we just need to reset check->health to 0 when a check fails for
a DOWN server. To do so, we just need to relax the condition to handle a failure
in the function set_server_check_status.
This patch must be backported to haproxy 1.5 and newer.
(cherry picked from commit b119a79fc336f2b6074de1c3113b1682c717985c)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/checks.c b/src/checks.c
index 80a9c70d..d07a82f8 100644
--- a/src/checks.c
+++ b/src/checks.c
@@ -243,7 +243,7 @@ static void set_server_check_status(struct check *check, short status, const cha
*/
if ((!(check->state & CHK_ST_AGENT) ||
(check->status >= HCHK_STATUS_L57DATA)) &&
- (check->health >= check->rise)) {
+ (check->health > 0)) {
HA_ATOMIC_ADD(&s->counters.failed_checks, 1);
report = 1;
check->health--;

+ 0
- 48
net/haproxy/patches/0009-BUG-MEDIUM-threads-Fix-the-sync-point-for-more-than-32-threads.patch View File

@ -1,48 +0,0 @@
commit 830324444e57c042666b17ac4584352cca85dafd
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Wed May 2 16:58:40 2018 +0200
BUG/MEDIUM: threads: Fix the sync point for more than 32 threads
In the sync point, to know if a thread has requested a synchronization, we call
the function thread_need_sync(). It should return 1 if yes, otherwise it should
return 0. It is intended to return a signed integer.
But internally, instead of returning 0 or 1, it returns 0 or tid_bit
(threads_want_sync & tid_bit). So, tid_bit is casted in integer. For the first
32 threads, it's ok, because we always check if thread_need_sync() returns
something else than 0. But this is a problem if HAProxy is started with more
than 32 threads, because for threads 33 to 64 (so for tid 32 to 63), their
tid_bit casted to integer are evaluated to 0. So the sync point does not work for
more than 32 threads.
Now, the function thread_need_sync() respects its contract, returning 0 or
1. the function thread_no_sync() has also been updated to avoid any ambiguities.
This patch must be backported in HAProxy 1.8.
(cherry picked from commit 148b16e1ceb819dfcef4c45828121d9cd7474b35)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/hathreads.c b/src/hathreads.c
index daf226ce..944a0d5b 100644
--- a/src/hathreads.c
+++ b/src/hathreads.c
@@ -85,7 +85,7 @@ void thread_want_sync()
/* Returns 1 if no thread has requested a sync. Otherwise, it returns 0. */
int thread_no_sync()
{
- return (threads_want_sync == 0);
+ return (threads_want_sync == 0UL);
}
/* Returns 1 if the current thread has requested a sync. Otherwise, it returns
@@ -93,7 +93,7 @@ int thread_no_sync()
*/
int thread_need_sync()
{
- return (threads_want_sync & tid_bit);
+ return ((threads_want_sync & tid_bit) != 0UL);
}
/* Thread barrier. Synchronizes all threads at the barrier referenced by

+ 0
- 31
net/haproxy/patches/0010-BUG-MINOR-lua-Put-tasks-to-sleep-when-waiting-for-data.patch View File

@ -1,31 +0,0 @@
commit 335bc7b74eee84f0a3bcb615cadd23fe01d1336c
Author: PiBa-NL <PiBa.NL.dev@gmail.com>
Date: Wed May 2 22:27:14 2018 +0200
BUG/MINOR: lua: Put tasks to sleep when waiting for data
If a lua socket is waiting for data it currently spins at 100% cpu usage.
This because the TICK_ETERNITY returned by the socket is ignored when
setting the 'expire' time of the task.
Fixed by removing the check for yields that return TICK_ETERNITY.
This should be backported to at least 1.8.
(cherry picked from commit fe971b35aeca9994f3823112c783aa796e74075a)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/hlua.c b/src/hlua.c
index bd0b87e3..0100e7cf 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -5536,8 +5536,7 @@ static struct task *hlua_process_task(struct task *task)
case HLUA_E_AGAIN: /* co process or timeout wake me later. */
notification_gc(&hlua->com);
- if (hlua->wake_time != TICK_ETERNITY)
- task->expire = hlua->wake_time;
+ task->expire = hlua->wake_time;
break;
/* finished with error. */

+ 0
- 252
net/haproxy/patches/0011-DOC-MINOR-clean-up-LUA-documentation-re-servers-array-table.patch View File

@ -1,252 +0,0 @@
commit 016feef5483397491af3242162934d9e9dbc6263
Author: Patrick Hemmer <haproxy@stormcloud9.net>
Date: Tue May 1 21:30:41 2018 -0400
DOC/MINOR: clean up LUA documentation re: servers & array/table.
* A few typos
* Fix definitions of values which are tables, not arrays.
* Consistent US English naming for "server" instead of "serveur".
[tfo: should be backported to 1.6 and higher]
(cherry picked from commit c6a1d711a4d47d68611aa28adecdadba96221bde)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/doc/lua-api/index.rst b/doc/lua-api/index.rst
index e7aa425d..2d210945 100644
--- a/doc/lua-api/index.rst
+++ b/doc/lua-api/index.rst
@@ -169,9 +169,9 @@ Core class
**context**: task, action, sample-fetch, converter
- This attribute is an array of declared proxies (frontend and backends). Each
- proxy give an access to his list of listeners and servers. Each entry is of
- type :ref:`proxy_class`
+ This attribute is a table of declared proxies (frontend and backends). Each
+ proxy give an access to his list of listeners and servers. The table is
+ indexed by proxy name, and each entry is of type :ref:`proxy_class`.
Warning, if you are declared frontend and backend with the same name, only one
of these are listed.
@@ -183,12 +183,9 @@ Core class
**context**: task, action, sample-fetch, converter
- This attribute is an array of declared proxies with backend capability. Each
- proxy give an access to his list of listeners and servers. Each entry is of
- type :ref:`proxy_class`
-
- Warning, if you are declared frontend and backend with the same name, only one
- of these are listed.
+ This attribute is a table of declared proxies with backend capability. Each
+ proxy give an access to his list of listeners and servers. The table is
+ indexed by the backend name, and each entry is of type :ref:`proxy_class`.
:see: :js:attr:`core.proxies`
:see: :js:attr:`core.frontends`
@@ -197,12 +194,9 @@ Core class
**context**: task, action, sample-fetch, converter
- This attribute is an array of declared proxies with frontend capability. Each
- proxy give an access to his list of listeners and servers. Each entry is of
- type :ref:`proxy_class`
-
- Warning, if you are declared frontend and backend with the same name, only one
- of these are listed.
+ This attribute is a table of declared proxies with frontend capability. Each
+ proxy give an access to his list of listeners and servers. The table is
+ indexed by the frontend name, and each entry is of type :ref:`proxy_class`.
:see: :js:attr:`core.proxies`
:see: :js:attr:`core.backends`
@@ -336,7 +330,7 @@ Core class
Lua execution or resume, so two consecutive call to the function "now" will
probably returns the same result.
- :returns: an array which contains two entries "sec" and "usec". "sec"
+ :returns: a table which contains two entries "sec" and "usec". "sec"
contains the current at the epoch format, and "usec" contains the
current microseconds.
@@ -439,9 +433,12 @@ Core class
**context**: body, init, task, action, sample-fetch, converter
- proxies is an array containing the list of all proxies declared in the
- configuration file. Each entry of the proxies array is an object of type
- :ref:`proxy_class`
+ proxies is a table containing the list of all proxies declared in the
+ configuration file. The table is indexed by the proxy name, and each entry
+ of the proxies table is an object of type :ref:`proxy_class`.
+
+ Warning, if you have declared a frontend and backend with the same name, only
+ one of these are listed.
.. js:function:: core.register_action(name, actions, func [, nb_args])
@@ -852,13 +849,14 @@ Proxy class
.. js:attribute:: Proxy.servers
- Contain an array with the attached servers. Each server entry is an object of
- type :ref:`server_class`.
+ Contain a table with the attached servers. The table is indexed by server
+ name, and each server entry is an object of type :ref:`server_class`.
.. js:attribute:: Proxy.listeners
- Contain an array with the attached listeners. Each listeners entry is an
- object of type :ref:`listener_class`.
+ Contain a table with the attached listeners. The table is indexed by listener
+ name, and each each listeners entry is an object of type
+ :ref:`listener_class`.
.. js:function:: Proxy.pause(px)
@@ -908,21 +906,25 @@ Proxy class
.. js:function:: Proxy.get_stats(px)
- Returns an array containg the proxy statistics. The statistics returned are
+ Returns a table containg the proxy statistics. The statistics returned are
not the same if the proxy is frontend or a backend.
:param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
proxy.
- :returns: a key/value array containing stats
+ :returns: a key/value table containing stats
.. _server_class:
Server class
============
+.. js:class:: Server
+
+ This class provides a way for manipulating servers and retrieving information.
+
.. js:function:: Server.is_draining(sv)
- Return true if the server is currently draining stiky connections.
+ Return true if the server is currently draining sticky connections.
:param class_server sv: A :ref:`server_class` which indicates the manipulated
server.
@@ -930,7 +932,7 @@ Server class
.. js:function:: Server.set_weight(sv, weight)
- Dynamically change the weight of the serveur. See the management socket
+ Dynamically change the weight of the server. See the management socket
documentation for more information about the format of the string.
:param class_server sv: A :ref:`server_class` which indicates the manipulated
@@ -939,7 +941,7 @@ Server class
.. js:function:: Server.get_weight(sv)
- This function returns an integer representing the serveur weight.
+ This function returns an integer representing the server weight.
:param class_server sv: A :ref:`server_class` which indicates the manipulated
server.
@@ -947,16 +949,16 @@ Server class
.. js:function:: Server.set_addr(sv, addr)
- Dynamically change the address of the serveur. See the management socket
+ Dynamically change the address of the server. See the management socket
documentation for more information about the format of the string.
:param class_server sv: A :ref:`server_class` which indicates the manipulated
server.
- :param string weight: A string describing the server address.
+ :param string addr: A string describing the server address.
.. js:function:: Server.get_addr(sv)
- Returns a string describing the address of the serveur.
+ Returns a string describing the address of the server.
:param class_server sv: A :ref:`server_class` which indicates the manipulated
server.
@@ -968,7 +970,7 @@ Server class
:param class_server sv: A :ref:`server_class` which indicates the manipulated
server.
- :returns: a key/value array containing stats
+ :returns: a key/value table containing stats
.. js:function:: Server.shut_sess(sv)
@@ -1085,7 +1087,7 @@ Listener class
:param class_listener ls: A :ref:`listener_class` which indicates the
manipulated listener.
- :returns: a key/value array containing stats
+ :returns: a key/value table containing stats
.. _concat_class:
@@ -1169,7 +1171,7 @@ Fetches class
usage. they are the chapters 7.3.2 to 7.3.6.
**warning** some sample fetches are not available in some context. These
- limitations are specified in this documentation when theire useful.
+ limitations are specified in this documentation when they're useful.
:see: :js:attr:`TXN.f`
:see: :js:attr:`TXN.sf`
@@ -1345,13 +1347,13 @@ HTTP class
.. js:function:: HTTP.req_get_headers(http)
- Returns an array containing all the request headers.
+ Returns a table containing all the request headers.
:param class_http http: The related http object.
- :returns: array of headers.
+ :returns: table of headers.
:see: :js:func:`HTTP.res_get_headers`
- This is the form of the returned array:
+ This is the form of the returned table:
.. code-block:: lua
@@ -1366,13 +1368,13 @@ HTTP class
.. js:function:: HTTP.res_get_headers(http)
- Returns an array containing all the response headers.
+ Returns a table containing all the response headers.
:param class_http http: The related http object.
- :returns: array of headers.
+ :returns: table of headers.
:see: :js:func:`HTTP.req_get_headers`
- This is the form of the returned array:
+ This is the form of the returned table:
.. code-block:: lua
@@ -2210,12 +2212,12 @@ AppletHTTP class
.. js:attribute:: AppletHTTP.headers
- :returns: array
+ :returns: table
- The attribute headers returns an array containing the HTTP
+ The attribute headers returns a table containing the HTTP
headers. The header names are always in lower case. As the header name can be
encountered more than once in each request, the value is indexed with 0 as
- first index value. The array have this form:
+ first index value. The table have this form:
.. code-block:: lua

+ 0
- 31
net/haproxy/patches/0012-BUG-MINOR-map-correctly-track-reference-to-the-last-ref_elt-being-dumped.patch View File

@ -1,31 +0,0 @@
commit b2219ae216a141acdf0e2a3f67d2c85aee2a2bc2
Author: Dragan Dosen <ddosen@haproxy.com>
Date: Fri May 4 16:27:15 2018 +0200
BUG/MINOR: map: correctly track reference to the last ref_elt being dumped
The bug was introduced in the commit 8d85aa4 ("BUG/MAJOR: map: fix
segfault during 'show map/acl' on cli").
This patch should be backported to 1.8, 1.7 and 1.6.
(cherry picked from commit 336a11f75571ad46f74a7c6247c13ed44f95da93)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/map.c b/src/map.c
index f40e4394..a9a1e53c 100644
--- a/src/map.c
+++ b/src/map.c
@@ -307,9 +307,9 @@ static int cli_io_handler_pat_list(struct appctx *appctx)
* reference to the last ref_elt being dumped.
*/
if (appctx->st2 == STAT_ST_LIST) {
- if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users)) {
- LIST_DEL(&appctx->ctx.sess.bref.users);
- LIST_INIT(&appctx->ctx.sess.bref.users);
+ if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users)) {
+ LIST_DEL(&appctx->ctx.map.bref.users);
+ LIST_INIT(&appctx->ctx.map.bref.users);
}
}
return 1;

+ 0
- 128
net/haproxy/patches/0013-BUG-MEDIUM-task-Dont-free-a-task-that-is-about-to-be-run.patch View File

@ -1,128 +0,0 @@
commit a0f0db361978154474d76028183647d5991f3b5c
Author: Olivier Houchard <ohouchard@haproxy.com>
Date: Fri May 4 15:46:16 2018 +0200
BUG/MEDIUM: task: Don't free a task that is about to be run.
While running a task, we may try to delete and free a task that is about to
be run, because it's part of the local tasks list, or because rq_next points
to it.
So flag any task that is in the local tasks list to be deleted, instead of
run, by setting t->process to NULL, and re-make rq_next a global,
thread-local variable, that is modified if we attempt to delete that task.
Many thanks to PiBa-NL for reporting this and analysing the problem.
This should be backported to 1.8.
(cherry picked from commit 9b36cb4a414c22e13d344afbbe70684e9f2f1d49)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/include/proto/task.h b/include/proto/task.h
index cbc1a907..c1c4c07e 100644
--- a/include/proto/task.h
+++ b/include/proto/task.h
@@ -90,6 +90,8 @@ extern unsigned int nb_tasks_cur;
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
extern struct pool_head *pool_head_task;
extern struct pool_head *pool_head_notification;
+extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */
+extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be potentially run */
__decl_hathreads(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */
__decl_hathreads(extern HA_SPINLOCK_T wq_lock); /* spin lock related to wait queue */
@@ -177,8 +179,11 @@ static inline struct task *__task_unlink_rq(struct task *t)
static inline struct task *task_unlink_rq(struct task *t)
{
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
- if (likely(task_in_rq(t)))
+ if (likely(task_in_rq(t))) {
+ if (&t->rq == rq_next)
+ rq_next = eb32sc_next(rq_next, tid_bit);
__task_unlink_rq(t);
+ }
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
return t;
}
@@ -230,7 +235,7 @@ static inline struct task *task_new(unsigned long thread_mask)
* Free a task. Its context must have been freed since it will be lost.
* The task count is decremented.
*/
-static inline void task_free(struct task *t)
+static inline void __task_free(struct task *t)
{
pool_free(pool_head_task, t);
if (unlikely(stopping))
@@ -238,6 +243,18 @@ static inline void task_free(struct task *t)
HA_ATOMIC_SUB(&nb_tasks, 1);
}
+static inline void task_free(struct task *t)
+{
+ /* There's no need to protect t->state with a lock, as the task
+ * has to run on the current thread.
+ */
+ if (t == curr_task || !(t->state & TASK_RUNNING))
+ __task_free(t);
+ else
+ t->process = NULL;
+}
+
+
/* Place <task> into the wait queue, where it may already be. If the expiration
* timer is infinite, do nothing and rely on wake_expired_task to clean up.
*/
diff --git a/src/task.c b/src/task.c
index fd9acf66..3d021bb4 100644
--- a/src/task.c
+++ b/src/task.c
@@ -39,6 +39,7 @@ unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
THREAD_LOCAL struct task *curr_task = NULL; /* task currently running or NULL */
+THREAD_LOCAL struct eb32sc_node *rq_next = NULL; /* Next task to be potentially run */
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) rq_lock); /* spin lock related to run queue */
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */
@@ -186,7 +187,6 @@ void process_runnable_tasks()
struct task *t;
int i;
int max_processed;
- struct eb32sc_node *rq_next;
struct task *local_tasks[16];
int local_tasks_count;
int final_tasks_count;
@@ -227,8 +227,14 @@ void process_runnable_tasks()
*/
if (likely(t->process == process_stream))
t = process_stream(t);
- else
- t = t->process(t);
+ else {
+ if (t->process != NULL)
+ t = t->process(t);
+ else {
+ __task_free(t);
+ t = NULL;
+ }
+ }
curr_task = NULL;
if (likely(t != NULL)) {
@@ -309,8 +315,14 @@ void process_runnable_tasks()
curr_task = t;
if (likely(t->process == process_stream))
t = process_stream(t);
- else
- t = t->process(t);
+ else {
+ if (t->process != NULL)
+ t = t->process(t);
+ else {
+ __task_free(t);
+ t = NULL;
+ }
+ }
curr_task = NULL;
if (t)
local_tasks[final_tasks_count++] = t;

+ 0
- 48
net/haproxy/patches/0014-BUG-MINOR-lua-schedule-socket-task-upon-lua-connect.patch View File

@ -1,48 +0,0 @@
commit 52ec3578c3ddc688ae14da3cd3e7e351494603d8
Author: PiBa-NL <PiBa.NL.dev@gmail.com>
Date: Sat May 5 23:51:42 2018 +0200
BUG/MINOR: lua: schedule socket task upon lua connect()
The parameters like server-address, port and timeout should be set before
process_stream task is called to avoid the stream being 'closed' before it
got initialized properly. This is most clearly visible when running with
tune.lua.forced-yield=1.. So scheduling the task should not be done when
creating the lua socket, but when connect is called. The error
"socket: not yet initialised, you can't set timeouts." would then appear.
Below code for example also shows this issue, as the sleep will
yield the lua code:
local con = core.tcp()
core.sleep(1)
con:settimeout(10)
(cherry picked from commit 706d5ee0c366787536213ccd6dea264d20b76a22)
[wt: must be backported to 1.7 and 1.6 as well with a different patch,
see https://www.mail-archive.com/haproxy@formilux.org/msg29924.html]
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/hlua.c b/src/hlua.c
index 0100e7cf..5cc918c9 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -2415,6 +2415,10 @@ __LJMP static int hlua_socket_connect(struct lua_State *L)
WILL_LJMP(luaL_error(L, "out of memory"));
}
xref_unlock(&socket->xref, peer);
+
+ task_wakeup(s->task, TASK_WOKEN_INIT);
+ /* Return yield waiting for connection. */
+
WILL_LJMP(hlua_yieldk(L, 0, 0, hlua_socket_connect_yield, TICK_ETERNITY, 0));
return 0;
@@ -2566,8 +2570,6 @@ __LJMP static int hlua_socket_new(lua_State *L)
strm->flags |= SF_DIRECT | SF_ASSIGNED | SF_ADDR_SET | SF_BE_ASSIGNED;
strm->target = &socket_tcp.obj_type;
- task_wakeup(strm->task, TASK_WOKEN_INIT);
- /* Return yield waiting for connection. */
return 1;
out_fail_stream:

+ 0
- 38
net/haproxy/patches/0015-BUG-MINOR-lua-ensure-large-proxy-IDs-can-be-represented.patch View File

@ -1,38 +0,0 @@
commit edb4427ab7c070a16cb9a23460f68b3fc3c041bb
Author: Willy Tarreau <w@1wt.eu>
Date: Sun May 6 14:50:09 2018 +0200
BUG/MINOR: lua: ensure large proxy IDs can be represented
In function hlua_fcn_new_proxy() too small a buffer was passed to
snprintf(), resulting in large proxy or listener IDs to make
snprintf() fail. It is unlikely to meet this case but let's fix it
anyway.
This fix must be backported to all stable branches where it applies.
(cherry picked from commit 29d698040d6bb56b29c036aeba05f0d52d8ce94b)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/hlua_fcn.c b/src/hlua_fcn.c
index a8d53d45..1df08f85 100644
--- a/src/hlua_fcn.c
+++ b/src/hlua_fcn.c
@@ -796,7 +796,7 @@ int hlua_fcn_new_proxy(lua_State *L, struct proxy *px)
struct server *srv;
struct listener *lst;
int lid;
- char buffer[10];
+ char buffer[17];
lua_newtable(L);
@@ -836,7 +836,7 @@ int hlua_fcn_new_proxy(lua_State *L, struct proxy *px)
if (lst->name)
lua_pushstring(L, lst->name);
else {
- snprintf(buffer, 10, "sock-%d", lid);
+ snprintf(buffer, sizeof(buffer), "sock-%d", lid);
lid++;
lua_pushstring(L, buffer);
}

+ 0
- 70
net/haproxy/patches/0016-BUG-MEDIUM-http-dont-always-abort-transfers-on-CF_SHUTR.patch View File

@ -1,70 +0,0 @@
commit 1c10e5b1b95142bb3ac385be1e60d8b180b2e99e
Author: Willy Tarreau <w@1wt.eu>
Date: Wed May 16 11:35:05 2018 +0200
BUG/MEDIUM: http: don't always abort transfers on CF_SHUTR
Pawel Karoluk reported on Discourse[1] that HTTP/2 breaks url_param.
Christopher managed to track it down to the HTTP_MSGF_WAIT_CONN flag
which is set there to ensure the connection is validated before sending
the headers, as we may need to rewind the stream and hash again upon
redispatch. What happens is that in the forwarding code we refrain
from forwarding when this flag is set and the connection is not yet
established, and for this we go through the missing_data_or_waiting
path. This exit path was initially designed only to wait for data
from the client, so it rightfully checks whether or not the client
has already closed since in that case it must not wait for more data.
But it also has the side effect of aborting such a transfer if the
client has closed after the request, which is exactly what happens
in H2.
A study on the code reveals that this whole combined check should
be revisited : while it used to be true that waiting had the same
error conditions as missing data, it's not true anymore. Some other
corner cases were identified, such as the risk to report a server
close instead of a client timeout when waiting for the client to
read the last chunk of data if the shutr is already present, or
the risk to fail a redispatch when a client uploads some data and
closes before the connection establishes. The compression seems to
be at risk of rare issues there if a write to a full buffer is not
yet possible but a shutr is already queued.
At the moment these risks are extremely unlikely but they do exist,
and their impact is very minor since it mostly concerns an issue not
being optimally handled, and the fixes risk to cause more serious
issues. Thus this patch only focuses on how the HTTP_MSGF_WAIT_CONN
is handled and leaves the rest untouched.
This patch needs to be backported to 1.8, and could be backported to
earlier versions to properly take care of HTTP/1 requests passing via
url_param which are closed immediately after the headers, though this
is unlikely as this behaviour is only exhibited by scripts.
[1] https://discourse.haproxy.org/t/haproxy-1-8-x-url-param-issue-in-http2/2482/13
(cherry picked from commit ba20dfc50161ba705a746d54ebc1a0a45c46beab)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/proto_http.c b/src/proto_http.c
index 4c18a27c..b384cef1 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -4865,7 +4865,8 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
if (!(s->res.flags & CF_READ_ATTACHED)) {
channel_auto_connect(req);
req->flags |= CF_WAKE_CONNECT;
- goto missing_data_or_waiting;
+ channel_dont_close(req); /* don't fail on early shutr */
+ goto waiting;
}
msg->flags &= ~HTTP_MSGF_WAIT_CONN;
}
@@ -4949,6 +4950,7 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
goto return_bad_req_stats_ok;
}
+ waiting:
/* waiting for the last bits to leave the buffer */
if (req->flags & CF_SHUTW)
goto aborted_xfer;

+ 0
- 709
net/haproxy/patches/0017-BUG-MEDIUM-pollers-Use-a-global-list-for-fd-shared-between-threads.patch View File

@ -1,709 +0,0 @@
commit 954db1d01a3d706d4cacd288f28e8517a635d36e
Author: Olivier Houchard <ohouchard@haproxy.com>
Date: Thu May 17 18:34:02 2018 +0200
BUG/MEDIUM: pollers: Use a global list for fd shared between threads.
With the old model, any fd shared by multiple threads, such as listeners
or dns sockets, would only be updated on one threads, so that could lead
to missed event, or spurious wakeups.
To avoid this, add a global list for fd that are shared, and only remove
entries from this list when every thread as updated its poller.
This subtly changes the semantics of updt_fd_polling(), as it now unlocks
the FD_LOCK on exit.
This is similar in spirit to commit 6b96f7289c2f401deef4bdc6e20792360807dde4
(with the bugfix from c55b88ece616afe0b28dc81eb39bad37b5f9c33f) applied,
but had to be rewrote, because of the differences between 1.8 and master.
This should only be applied to 1.8.
diff --git a/include/common/hathreads.h b/include/common/hathreads.h
index 325a869a..86db4d5c 100644
--- a/include/common/hathreads.h
+++ b/include/common/hathreads.h
@@ -201,6 +201,8 @@ void thread_exit_sync(void);
int thread_no_sync(void);
int thread_need_sync(void);
+extern unsigned long all_threads_mask;
+
#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
/* WARNING!!! if you update this enum, please also keep lock_label() up to date below */
@@ -209,6 +211,7 @@ enum lock_label {
FDTAB_LOCK,
FDCACHE_LOCK,
FD_LOCK,
+ FD_UPDATE_LOCK,
POLL_LOCK,
TASK_RQ_LOCK,
TASK_WQ_LOCK,
@@ -330,6 +333,7 @@ static inline const char *lock_label(enum lock_label label)
case FDCACHE_LOCK: return "FDCACHE";
case FD_LOCK: return "FD";
case FDTAB_LOCK: return "FDTAB";
+ case FD_UPDATE_LOCK: return "FD_UPDATE";
case POLL_LOCK: return "POLL";
case TASK_RQ_LOCK: return "TASK_RQ";
case TASK_WQ_LOCK: return "TASK_WQ";
diff --git a/include/proto/fd.h b/include/proto/fd.h
index bb91bb2c..b6199ccf 100644
--- a/include/proto/fd.h
+++ b/include/proto/fd.h
@@ -43,6 +43,9 @@ extern THREAD_LOCAL int fd_nbupdt; // number of updates in the list
__decl_hathreads(extern HA_SPINLOCK_T __attribute__((aligned(64))) fdtab_lock); /* global lock to protect fdtab array */
__decl_hathreads(extern HA_RWLOCK_T __attribute__((aligned(64))) fdcache_lock); /* global lock to protect fd_cache array */
__decl_hathreads(extern HA_SPINLOCK_T __attribute__((aligned(64))) poll_lock); /* global lock to protect poll info */
+__decl_hathreads(extern HA_SPINLOCK_T __attribute__((aligned(64))) fd_updt_lock); /* global lock to protect the update list */
+
+extern struct fdlist update_list; // Global update list
/* Deletes an FD from the fdsets, and recomputes the maxfd limit.
* The file descriptor is also closed.
@@ -96,14 +99,70 @@ void fd_process_cached_events();
/* Mark fd <fd> as updated for polling and allocate an entry in the update list
* for this if it was not already there. This can be done at any time.
+ * This function expects the FD lock to be locked, and returns with the
+ * FD lock unlocked.
*/
static inline void updt_fd_polling(const int fd)
{
- if (fdtab[fd].update_mask & tid_bit)
+ if ((fdtab[fd].update_mask & fdtab[fd].thread_mask) ==
+ fdtab[fd].thread_mask) {
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
/* already scheduled for update */
return;
- fdtab[fd].update_mask |= tid_bit;
- fd_updt[fd_nbupdt++] = fd;
+ }
+ if (fdtab[fd].thread_mask == tid_bit) {
+ fdtab[fd].update_mask |= tid_bit;
+ fd_updt[fd_nbupdt++] = fd;
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ } else {
+ /* This is ugly, but we can afford to unlock the FD lock
+ * before we acquire the fd_updt_lock, to prevent a
+ * lock order reversal, because this function is only called
+ * from fd_update_cache(), and all users of fd_update_cache()
+ * used to just unlock the fd lock just after, anyway.
+ */
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock);
+ /* If update_mask is non-nul, then it's already in the list
+ * so we don't have to add it.
+ */
+ if (fdtab[fd].update_mask == 0) {
+ if (update_list.first == -1) {
+ update_list.first = update_list.last = fd;
+ fdtab[fd].update.next = fdtab[fd].update.prev = -1;
+ } else {
+ fdtab[update_list.last].update.next = fd;
+ fdtab[fd].update.prev = update_list.last;
+ fdtab[fd].update.next = -1;
+ update_list.last = fd;
+ }
+ }
+ fdtab[fd].update_mask |= fdtab[fd].thread_mask;
+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock);
+
+ }
+}
+
+/* Called from the poller to acknoledge we read an entry from the global
+ * update list, to remove our bit from the update_mask, and remove it from
+ * the list if we were the last one.
+ */
+/* Expects to be called with the FD lock and the FD update lock held */
+static inline void done_update_polling(int fd)
+{
+ fdtab[fd].update_mask &= ~tid_bit;
+ if ((fdtab[fd].update_mask & all_threads_mask) == 0) {
+ if (fdtab[fd].update.prev != -1)
+ fdtab[fdtab[fd].update.prev].update.next =
+ fdtab[fd].update.next;
+ else
+ update_list.first = fdtab[fd].update.next;
+ if (fdtab[fd].update.next != -1)
+ fdtab[fdtab[fd].update.next].update.prev =
+ fdtab[fd].update.prev;
+ else
+ update_list.last = fdtab[fd].update.prev;
+ }
}
@@ -175,13 +234,6 @@ static inline int fd_compute_new_polled_status(int state)
*/
static inline void fd_update_cache(int fd)
{
- /* 3 states for each direction require a polling update */
- if ((fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_ACTIVE_R)) == FD_EV_POLLED_R ||
- (fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_READY_R | FD_EV_ACTIVE_R)) == FD_EV_ACTIVE_R ||
- (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_ACTIVE_W)) == FD_EV_POLLED_W ||
- (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_READY_W | FD_EV_ACTIVE_W)) == FD_EV_ACTIVE_W)
- updt_fd_polling(fd);
-
/* only READY and ACTIVE states (the two with both flags set) require a cache entry */
if (((fdtab[fd].state & (FD_EV_READY_R | FD_EV_ACTIVE_R)) == (FD_EV_READY_R | FD_EV_ACTIVE_R)) ||
((fdtab[fd].state & (FD_EV_READY_W | FD_EV_ACTIVE_W)) == (FD_EV_READY_W | FD_EV_ACTIVE_W))) {
@@ -190,6 +242,14 @@ static inline void fd_update_cache(int fd)
else {
fd_release_cache_entry(fd);
}
+ /* 3 states for each direction require a polling update */
+ if ((fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_ACTIVE_R)) == FD_EV_POLLED_R ||
+ (fdtab[fd].state & (FD_EV_POLLED_R | FD_EV_READY_R | FD_EV_ACTIVE_R)) == FD_EV_ACTIVE_R ||
+ (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_ACTIVE_W)) == FD_EV_POLLED_W ||
+ (fdtab[fd].state & (FD_EV_POLLED_W | FD_EV_READY_W | FD_EV_ACTIVE_W)) == FD_EV_ACTIVE_W)
+ updt_fd_polling(fd);
+ else
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/*
@@ -271,8 +331,9 @@ static inline void fd_stop_recv(int fd)
if (fd_recv_active(fd)) {
fdtab[fd].state &= ~FD_EV_ACTIVE_R;
fd_update_cache(fd); /* need an update entry to change the state */
- }
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ /* the FD lock is unlocked by fd_update_cache() */
+ } else
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Disable processing send events on fd <fd> */
@@ -282,8 +343,9 @@ static inline void fd_stop_send(int fd)
if (fd_send_active(fd)) {
fdtab[fd].state &= ~FD_EV_ACTIVE_W;
fd_update_cache(fd); /* need an update entry to change the state */
- }
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ /* the FD lock is unlocked by fd_update_cache() */
+ } else
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Disable processing of events on fd <fd> for both directions. */
@@ -293,8 +355,9 @@ static inline void fd_stop_both(int fd)
if (fd_active(fd)) {
fdtab[fd].state &= ~FD_EV_ACTIVE_RW;
fd_update_cache(fd); /* need an update entry to change the state */
- }
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ /* the FD lock is unlocked by fd_update_cache() */
+ } else
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Report that FD <fd> cannot receive anymore without polling (EAGAIN detected). */
@@ -304,8 +367,9 @@ static inline void fd_cant_recv(const int fd)
if (fd_recv_ready(fd)) {
fdtab[fd].state &= ~FD_EV_READY_R;
fd_update_cache(fd); /* need an update entry to change the state */
- }
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ /* the FD lock is unlocked by fd_update_cache() */
+ } else
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Report that FD <fd> can receive anymore without polling. */
@@ -315,8 +379,9 @@ static inline void fd_may_recv(const int fd)
if (!fd_recv_ready(fd)) {
fdtab[fd].state |= FD_EV_READY_R;
fd_update_cache(fd); /* need an update entry to change the state */
- }
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ /* the FD lock is unlocked by fd_update_cache() */
+ } else
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Disable readiness when polled. This is useful to interrupt reading when it
@@ -330,8 +395,9 @@ static inline void fd_done_recv(const int fd)
if (fd_recv_polled(fd) && fd_recv_ready(fd)) {
fdtab[fd].state &= ~FD_EV_READY_R;
fd_update_cache(fd); /* need an update entry to change the state */
- }
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ /* the FD lock is unlocked by fd_update_cache() */
+ } else
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Report that FD <fd> cannot send anymore without polling (EAGAIN detected). */
@@ -341,8 +407,9 @@ static inline void fd_cant_send(const int fd)
if (fd_send_ready(fd)) {
fdtab[fd].state &= ~FD_EV_READY_W;
fd_update_cache(fd); /* need an update entry to change the state */
- }
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ /* the FD lock is unlocked by fd_update_cache() */
+ } else
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Report that FD <fd> can send anymore without polling (EAGAIN detected). */
@@ -352,8 +419,9 @@ static inline void fd_may_send(const int fd)
if (!fd_send_ready(fd)) {
fdtab[fd].state |= FD_EV_READY_W;
fd_update_cache(fd); /* need an update entry to change the state */
- }
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ /* the FD lock is unlocked by fd_update_cache() */
+ } else
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Prepare FD <fd> to try to receive */
@@ -363,8 +431,9 @@ static inline void fd_want_recv(int fd)
if (!fd_recv_active(fd)) {
fdtab[fd].state |= FD_EV_ACTIVE_R;
fd_update_cache(fd); /* need an update entry to change the state */
- }
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ /* the FD lock is unlocked by fd_update_cache() */
+ } else
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Prepare FD <fd> to try to send */
@@ -374,8 +443,9 @@ static inline void fd_want_send(int fd)
if (!fd_send_active(fd)) {
fdtab[fd].state |= FD_EV_ACTIVE_W;
fd_update_cache(fd); /* need an update entry to change the state */
- }
- HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ /* the FD lock is unlocked by fd_update_cache() */
+ } else
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Update events seen for FD <fd> and its state if needed. This should be called
diff --git a/include/types/fd.h b/include/types/fd.h
index 9f2c5fee..8e34c624 100644
--- a/include/types/fd.h
+++ b/include/types/fd.h
@@ -90,11 +90,24 @@ enum fd_states {
*/
#define DEAD_FD_MAGIC 0xFDDEADFD
+struct fdlist_entry {
+ int next;
+ int prev;
+} __attribute__ ((aligned(8)));
+
+/* head of the fd list */
+struct fdlist {
+ int first;
+ int last;
+} __attribute__ ((aligned(8)));
+
+
/* info about one given fd */
struct fdtab {
__decl_hathreads(HA_SPINLOCK_T lock);
unsigned long thread_mask; /* mask of thread IDs authorized to process the task */
unsigned long polled_mask; /* mask of thread IDs currently polling this fd */
+ struct fdlist_entry update; /* Entry in the global update list */
unsigned long update_mask; /* mask of thread IDs having an update for fd */
void (*iocb)(int fd); /* I/O handler */
void *owner; /* the connection or listener associated with this fd, NULL if closed */
diff --git a/src/ev_epoll.c b/src/ev_epoll.c
index 124b8163..adc15acd 100644
--- a/src/ev_epoll.c
+++ b/src/ev_epoll.c
@@ -59,13 +59,51 @@ REGPRM1 static void __fd_clo(int fd)
}
}
+static void _update_fd(int fd)
+{
+ int en, opcode;
+
+ en = fdtab[fd].state;
+
+ if (fdtab[fd].polled_mask & tid_bit) {
+ if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) {
+ /* fd removed from poll list */
+ opcode = EPOLL_CTL_DEL;
+ HA_ATOMIC_AND(&fdtab[fd].polled_mask, ~tid_bit);
+ }
+ else {
+ /* fd status changed */
+ opcode = EPOLL_CTL_MOD;
+ }
+ }
+ else if ((fdtab[fd].thread_mask & tid_bit) && (en & FD_EV_POLLED_RW)) {
+ /* new fd in the poll list */
+ opcode = EPOLL_CTL_ADD;
+ HA_ATOMIC_OR(&fdtab[fd].polled_mask, tid_bit);
+ }
+ else {
+ return;
+ }
+
+ /* construct the epoll events based on new state */
+ ev.events = 0;
+ if (en & FD_EV_POLLED_R)
+ ev.events |= EPOLLIN | EPOLLRDHUP;
+
+ if (en & FD_EV_POLLED_W)
+ ev.events |= EPOLLOUT;
+
+ ev.data.fd = fd;
+ epoll_ctl(epoll_fd[tid], opcode, fd, &ev);
+}
+
/*
* Linux epoll() poller
*/
REGPRM2 static void _do_poll(struct poller *p, int exp)
{
int status, eo, en;
- int fd, opcode;
+ int fd;
int count;
int updt_idx;
int wait_time;
@@ -89,39 +127,31 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
en = fd_compute_new_polled_status(eo);
fdtab[fd].state = en;
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
-
- if (fdtab[fd].polled_mask & tid_bit) {
- if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) {
- /* fd removed from poll list */
- opcode = EPOLL_CTL_DEL;
- HA_ATOMIC_AND(&fdtab[fd].polled_mask, ~tid_bit);
- }
- else {
- /* fd status changed */
- opcode = EPOLL_CTL_MOD;
- }
- }
- else if ((fdtab[fd].thread_mask & tid_bit) && (en & FD_EV_POLLED_RW)) {
- /* new fd in the poll list */
- opcode = EPOLL_CTL_ADD;
- HA_ATOMIC_OR(&fdtab[fd].polled_mask, tid_bit);
- }
+ _update_fd(fd);
+ }
+ fd_nbupdt = 0;
+ /* Scan the global update list */
+ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock);
+ for (fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) {
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (fdtab[fd].update_mask & tid_bit)
+ done_update_polling(fd);
else {
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
continue;
}
+ fdtab[fd].new = 0;
- /* construct the epoll events based on new state */
- ev.events = 0;
- if (en & FD_EV_POLLED_R)
- ev.events |= EPOLLIN | EPOLLRDHUP;
-
- if (en & FD_EV_POLLED_W)
- ev.events |= EPOLLOUT;
+ eo = fdtab[fd].state;
+ en = fd_compute_new_polled_status(eo);
+ fdtab[fd].state = en;
- ev.data.fd = fd;
- epoll_ctl(epoll_fd[tid], opcode, fd, &ev);
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (!fdtab[fd].owner)
+ continue;
+ _update_fd(fd);
}
- fd_nbupdt = 0;
+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock);
/* compute the epoll_wait() timeout */
if (!exp)
@@ -208,8 +238,10 @@ static int init_epoll_per_thread()
* fd for this thread. Let's just mark them as updated, the poller will
* do the rest.
*/
- for (fd = 0; fd < maxfd; fd++)
+ for (fd = 0; fd < maxfd; fd++) {
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
updt_fd_polling(fd);
+ }
return 1;
fail_fd:
diff --git a/src/ev_kqueue.c b/src/ev_kqueue.c
index 8cd6dd84..642de8b3 100644
--- a/src/ev_kqueue.c
+++ b/src/ev_kqueue.c
@@ -33,6 +33,41 @@ static int kqueue_fd[MAX_THREADS]; // per-thread kqueue_fd
static THREAD_LOCAL struct kevent *kev = NULL;
static struct kevent *kev_out = NULL; // Trash buffer for kevent() to write the eventlist in
+static int _update_fd(int fd, int start)
+{
+ int en;
+ int changes = start;
+
+ en = fdtab[fd].state;
+
+ if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) {
+ if (!(fdtab[fd].polled_mask & tid_bit)) {
+ /* fd was not watched, it's still not */
+ return 0;
+ }
+ /* fd totally removed from poll list */
+ EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
+ EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
+ HA_ATOMIC_AND(&fdtab[fd].polled_mask, ~tid_bit);
+ }
+ else {
+ /* OK fd has to be monitored, it was either added or changed */
+
+ if (en & FD_EV_POLLED_R)
+ EV_SET(&kev[changes++], fd, EVFILT_READ, EV_ADD, 0, 0, NULL);
+ else if (fdtab[fd].polled_mask & tid_bit)
+ EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
+
+ if (en & FD_EV_POLLED_W)
+ EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL);
+ else if (fdtab[fd].polled_mask & tid_bit)
+ EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
+
+ HA_ATOMIC_OR(&fdtab[fd].polled_mask, tid_bit);
+ }
+ return changes;
+}
+
/*
* kqueue() poller
*/
@@ -66,32 +101,32 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
fdtab[fd].state = en;
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
- if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) {
- if (!(fdtab[fd].polled_mask & tid_bit)) {
- /* fd was not watched, it's still not */
- continue;
- }
- /* fd totally removed from poll list */
- EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
- EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
- HA_ATOMIC_AND(&fdtab[fd].polled_mask, ~tid_bit);
- }
- else {
- /* OK fd has to be monitored, it was either added or changed */
+ changes = _update_fd(fd, changes);
+ }
- if (en & FD_EV_POLLED_R)
- EV_SET(&kev[changes++], fd, EVFILT_READ, EV_ADD, 0, 0, NULL);
- else if (fdtab[fd].polled_mask & tid_bit)
- EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
+ /* Scan the global update list */
+ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock);
+ for (fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) {
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (fdtab[fd].update_mask & tid_bit)
+ done_update_polling(fd);
+ else {
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ continue;
+ }
+ fdtab[fd].new = 0;
- if (en & FD_EV_POLLED_W)
- EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL);
- else if (fdtab[fd].polled_mask & tid_bit)
- EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
+ eo = fdtab[fd].state;
+ en = fd_compute_new_polled_status(eo);
+ fdtab[fd].state = en;
- HA_ATOMIC_OR(&fdtab[fd].polled_mask, tid_bit);
- }
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if (!fdtab[fd].owner)
+ continue;
+ changes = _update_fd(fd, changes);
}
+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock);
+
if (changes) {
#ifdef EV_RECEIPT
kev[0].flags |= EV_RECEIPT;
@@ -189,8 +224,10 @@ static int init_kqueue_per_thread()
* fd for this thread. Let's just mark them as updated, the poller will
* do the rest.
*/
- for (fd = 0; fd < maxfd; fd++)
+ for (fd = 0; fd < maxfd; fd++) {
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
updt_fd_polling(fd);
+ }
return 1;
fail_fd:
diff --git a/src/ev_poll.c b/src/ev_poll.c
index b7cc0bb3..c913ced2 100644
--- a/src/ev_poll.c
+++ b/src/ev_poll.c
@@ -104,6 +104,51 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
}
}
+ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock);
+ for (fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) {
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (fdtab[fd].update_mask & tid_bit) {
+ /* Cheat a bit, as the state is global to all pollers
+ * we don't need every thread ot take care of the
+ * update.
+ */
+ fdtab[fd].update_mask &= ~all_threads_mask;
+ done_update_polling(fd);
+ } else {
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ continue;
+ }
+
+ if (!fdtab[fd].owner) {
+ activity[tid].poll_drop++;
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ continue;
+ }
+
+ fdtab[fd].new = 0;
+
+ eo = fdtab[fd].state;
+ en = fd_compute_new_polled_status(eo);
+ fdtab[fd].state = en;
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+
+ if ((eo ^ en) & FD_EV_POLLED_RW) {
+ /* poll status changed, update the lists */
+ HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
+ if ((eo & ~en) & FD_EV_POLLED_R)
+ hap_fd_clr(fd, fd_evts[DIR_RD]);
+ else if ((en & ~eo) & FD_EV_POLLED_R)
+ hap_fd_set(fd, fd_evts[DIR_RD]);
+
+ if ((eo & ~en) & FD_EV_POLLED_W)
+ hap_fd_clr(fd, fd_evts[DIR_WR]);
+ else if ((en & ~eo) & FD_EV_POLLED_W)
+ hap_fd_set(fd, fd_evts[DIR_WR]);
+ HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
+ }
+
+ }
+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock);
fd_nbupdt = 0;
nbfd = 0;
diff --git a/src/ev_select.c b/src/ev_select.c
index 5f3486ed..bde923ea 100644
--- a/src/ev_select.c
+++ b/src/ev_select.c
@@ -70,7 +70,42 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
en = fd_compute_new_polled_status(eo);
fdtab[fd].state = en;
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ if ((eo ^ en) & FD_EV_POLLED_RW) {
+ /* poll status changed, update the lists */
+ HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
+ if ((eo & ~en) & FD_EV_POLLED_R)
+ FD_CLR(fd, fd_evts[DIR_RD]);
+ else if ((en & ~eo) & FD_EV_POLLED_R)
+ FD_SET(fd, fd_evts[DIR_RD]);
+
+ if ((eo & ~en) & FD_EV_POLLED_W)
+ FD_CLR(fd, fd_evts[DIR_WR]);
+ else if ((en & ~eo) & FD_EV_POLLED_W)
+ FD_SET(fd, fd_evts[DIR_WR]);
+ HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
+ }
+ }
+ HA_SPIN_LOCK(FD_UPDATE_LOCK, &fd_updt_lock);
+ for (fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) {
+ HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
+ if (fdtab[fd].update_mask & tid_bit) {
+ /* Cheat a bit, as the state is global to all pollers
+ * we don't need every thread ot take care of the
+ * update.
+ */
+ fdtab[fd].update_mask &= ~all_threads_mask;
+ done_update_polling(fd);
+ } else {
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
+ continue;
+ }
+ fdtab[fd].new = 0;
+
+ eo = fdtab[fd].state;
+ en = fd_compute_new_polled_status(eo);
+ fdtab[fd].state = en;
+ HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if ((eo ^ en) & FD_EV_POLLED_RW) {
/* poll status changed, update the lists */
HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
@@ -85,7 +120,9 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
FD_SET(fd, fd_evts[DIR_WR]);
HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
}
+
}
+ HA_SPIN_UNLOCK(FD_UPDATE_LOCK, &fd_updt_lock);
fd_nbupdt = 0;
/* let's restore fdset state */
diff --git a/src/fd.c b/src/fd.c
index b64130ed..a134e93e 100644
--- a/src/fd.c
+++ b/src/fd.c
@@ -175,9 +175,12 @@ unsigned long fd_cache_mask = 0; // Mask of threads with events in the cache
THREAD_LOCAL int *fd_updt = NULL; // FD updates list
THREAD_LOCAL int fd_nbupdt = 0; // number of updates in the list
+struct fdlist update_list; // Global update list
__decl_hathreads(HA_SPINLOCK_T fdtab_lock); /* global lock to protect fdtab array */
__decl_hathreads(HA_RWLOCK_T fdcache_lock); /* global lock to protect fd_cache array */
__decl_hathreads(HA_SPINLOCK_T poll_lock); /* global lock to protect poll info */
+__decl_hathreads(HA_SPINLOCK_T) fd_updt_lock; /* global lock to protect the update list */
+
/* Deletes an FD from the fdsets, and recomputes the maxfd limit.
* The file descriptor is also closed.
@@ -341,6 +344,9 @@ int init_pollers()
HA_SPIN_INIT(&fdtab_lock);
HA_RWLOCK_INIT(&fdcache_lock);
HA_SPIN_INIT(&poll_lock);
+ HA_SPIN_INIT(&fd_updt_lock);
+ update_list.first = update_list.last = -1;
+
do {
bp = NULL;
for (p = 0; p < nbpollers; p++)
diff --git a/src/hathreads.c b/src/hathreads.c
index 944a0d5b..66ed482a 100644
--- a/src/hathreads.c
+++ b/src/hathreads.c
@@ -31,7 +31,7 @@ void thread_sync_io_handler(int fd)
static HA_SPINLOCK_T sync_lock;
static int threads_sync_pipe[2];
static unsigned long threads_want_sync = 0;
-static unsigned long all_threads_mask = 0;
+unsigned long all_threads_mask = 0;
#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
struct lock_stat lock_stats[LOCK_LABELS];

+ 0
- 45
net/haproxy/patches/0018-BUG-MEDIUM-ssl-properly-protect-SSL-cert-generation.patch View File

@ -1,45 +0,0 @@
commit f571613244e4c02ca7aada30c89a6244d09d58d4
Author: Willy Tarreau <w@1wt.eu>
Date: Thu May 17 10:56:47 2018 +0200
BUG/MEDIUM: ssl: properly protect SSL cert generation
Commit 821bb9b ("MAJOR: threads/ssl: Make SSL part thread-safe") added
insufficient locking to the cert lookup and generation code : it uses
lru64_lookup(), which will automatically remove and add a list element
to the LRU list. It cannot be simply read-locked.
A long-term improvement should consist in using a lockless mechanism
in lru64_lookup() to safely move the list element at the head. For now
let's simply use a write lock during the lookup. The effect will be
minimal since it's used only in conjunction with automatically generated
certificates, which are much more expensive and rarely used.
This fix must be backported to 1.8.
(cherry picked from commit 03f4ec47d9ffff629b07dcba9f0f134a7c7e44b2)
Signed-off-by: William Lallemand <wlallemand@haproxy.org>
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 1196d111..9fb2bb15 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -1812,15 +1812,15 @@ ssl_sock_assign_generated_cert(unsigned int key, struct bind_conf *bind_conf, SS
struct lru64 *lru = NULL;
if (ssl_ctx_lru_tree) {
- HA_RWLOCK_RDLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+ HA_RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
lru = lru64_lookup(key, ssl_ctx_lru_tree, bind_conf->ca_sign_cert, 0);
if (lru && lru->domain) {
if (ssl)
SSL_set_SSL_CTX(ssl, (SSL_CTX *)lru->data);
- HA_RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+ HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
return (SSL_CTX *)lru->data;
}
- HA_RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+ HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
}
return NULL;
}

Loading…
Cancel
Save