Browse Source

Merge pull request #11223 from gladiac1337/haproxy-2.1.2

haproxy: Update HAProxy to v2.1.2
lilik-openwrt-22.03
Rosen Penev 5 years ago
committed by GitHub
parent
commit
f79a58326a
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
58 changed files with 3486 additions and 42 deletions
  1. +6
    -38
      net/haproxy/Makefile
  2. +2
    -2
      net/haproxy/get-latest-patches.sh
  3. +101
    -0
      net/haproxy/patches/000-BUG-MINOR-checks-refine-which-errno-values-are-really-errors.patch
  4. +46
    -0
      net/haproxy/patches/001-BUG-MEDIUM-checks-Only-attempt-to-do-handshakes-if-the-connection-is-ready.patch
  5. +37
    -0
      net/haproxy/patches/002-BUG-MEDIUM-connections-Hold-the-lock-when-wanting-to-kill-a-connection.patch
  6. +47
    -0
      net/haproxy/patches/003-MINOR-config-disable-busy-polling-on-old-processes.patch
  7. +39
    -0
      net/haproxy/patches/004-MINOR-ssl-Remove-unused-variable-need_out.patch
  8. +55
    -0
      net/haproxy/patches/005-BUG-MINOR-h1-Report-the-right-error-position-when-a-header-value-is-invalid.patch
  9. +32
    -0
      net/haproxy/patches/006-BUG-MINOR-proxy-Fix-input-data-copy-when-an-error-is-captured.patch
  10. +40
    -0
      net/haproxy/patches/007-BUG-MEDIUM-http-ana-Truncate-the-response-when-a-redirect-rule-is-applied.patch
  11. +34
    -0
      net/haproxy/patches/008-BUG-MINOR-channel-inject-output-data-at-the-end-of-output.patch
  12. +39
    -0
      net/haproxy/patches/009-BUG-MEDIUM-session-do-not-report-a-failure-when-rejecting-a-session.patch
  13. +37
    -0
      net/haproxy/patches/010-BUG-MINOR-stream-int-Dont-trigger-L7-retry-if-max-retries-is-already-reached.patch
  14. +30
    -0
      net/haproxy/patches/011-BUG-MEDIUM-tasks-Use-the-MT-macros-in-tasklet_free.patch
  15. +153
    -0
      net/haproxy/patches/012-BUG-MINOR-mux-h2-use-a-safe-list_for_each_entry-in-h2_send.patch
  16. +32
    -0
      net/haproxy/patches/013-BUG-MEDIUM-mux-h2-fix-missing-test-on-sending_list-in-previous-patch.patch
  17. +79
    -0
      net/haproxy/patches/014-BUG-MEDIUM-mux-h2-dont-stop-sending-when-crossing-a-buffer-boundary.patch
  18. +37
    -0
      net/haproxy/patches/015-BUG-MINOR-cli-mworker-cant-start-haproxy-with-2-programs.patch
  19. +60
    -0
      net/haproxy/patches/016-REGTEST-mcli-mcli_start_progs-start-2-programs.patch
  20. +37
    -0
      net/haproxy/patches/017-BUG-MEDIUM-mworker-remain-in-mworker-mode-during-reload.patch
  21. +31
    -0
      net/haproxy/patches/018-BUG-MEDIUM-mux_h1-Dont-call-h1_send-if-we-subscribed.patch
  22. +141
    -0
      net/haproxy/patches/019-BUG-MAJOR-hashes-fix-the-signedness-of-the-hash-inputs.patch
  23. +127
    -0
      net/haproxy/patches/020-REGTEST-add-sample_fetches-hashes-vtc-to-validate-hashes.patch
  24. +156
    -0
      net/haproxy/patches/021-BUG-MEDIUM-cli-_getsocks-must-send-the-peers-sockets.patch
  25. +70
    -0
      net/haproxy/patches/022-BUG-MINOR-stream-dont-mistake-match-rules-for-store-request-rules.patch
  26. +145
    -0
      net/haproxy/patches/023-BUG-MEDIUM-connection-add-a-mux-flag-to-indicate-splice-usability.patch
  27. +44
    -0
      net/haproxy/patches/024-BUG-MINOR-pattern-handle-errors-from-fgets-when-trying-to-load-patterns.patch
  28. +26
    -0
      net/haproxy/patches/025-BUG-MINOR-cache-Fix-leak-of-cache-name-in-error-path.patch
  29. +32
    -0
      net/haproxy/patches/026-BUG-MINOR-dns-Make-dns_query_id_seed-unsigned.patch
  30. +92
    -0
      net/haproxy/patches/027-BUG-MINOR-51d-Fix-bug-when-HTX-is-enabled.patch
  31. +28
    -0
      net/haproxy/patches/028-BUILD-pattern-include-errno-h.patch
  32. +202
    -0
      net/haproxy/patches/029-BUG-MINOR-http-ana-filters-Wait-end-of-the-http_end-callback-for-all-filters.patch
  33. +133
    -0
      net/haproxy/patches/030-BUG-MINOR-http-rules-Remove-buggy-deinit-functions-for-HTTP-rules.patch
  34. +56
    -0
      net/haproxy/patches/031-BUG-MINOR-stick-table-Use-MAX_SESS_STKCTR-as-the-max-track-ID-during-parsing.patch
  35. +65
    -0
      net/haproxy/patches/032-BUG-MINOR-tcp-rules-Fix-memory-releases-on-error-path-during-action-parsing.patch
  36. +32
    -0
      net/haproxy/patches/033-BUG-MINOR-ssl-ssl_sock_load_ocsp_response_from_file-memory-leak.patch
  37. +31
    -0
      net/haproxy/patches/034-BUG-MINOR-ssl-ssl_sock_load_issuer_file_into_ckch-memory-leak.patch
  38. +36
    -0
      net/haproxy/patches/035-BUG-MINOR-ssl-ssl_sock_load_sctl_from_file-memory-leak.patch
  39. +140
    -0
      net/haproxy/patches/036-MINOR-proxy-http-ana-Add-support-of-extra-attributes-for-the-cookie-directive.patch
  40. +89
    -0
      net/haproxy/patches/037-BUG-MINOR-http_act-dont-check-capture-id-in-backend.patch
  41. +30
    -0
      net/haproxy/patches/038-BUG-MEDIUM-netscaler-Dont-forget-to-allocate-storage-for-conn--src-dst.patch
  42. +129
    -0
      net/haproxy/patches/039-BUG-MINOR-ssl-ssl_sock_load_pem_into_ckch-is-not-consistent.patch
  43. +48
    -0
      net/haproxy/patches/040-BUG-MINOR-ssl-cli-free-the-previous-ckch-content-once-a-PEM-is-loaded.patch
  44. +30
    -0
      net/haproxy/patches/041-CLEANUP-stats-shut-up-a-wrong-null-deref-warning-from-gcc-9-2.patch
  45. +28
    -0
      net/haproxy/patches/042-BUG-MINOR-ssl-increment-issuer-refcount-if-in-chain.patch
  46. +29
    -0
      net/haproxy/patches/043-BUG-MINOR-ssl-memory-leak-w-the-ocsp_issuer.patch
  47. +27
    -0
      net/haproxy/patches/044-BUG-MINOR-ssl-typo-in-previous-patch.patch
  48. +67
    -0
      net/haproxy/patches/045-BUG-MINOR-ssl-cli-ocsp_issuer-must-be-set-w-set-ssl-cert.patch
  49. +46
    -0
      net/haproxy/patches/046-BUG-MEDIUM-0rtt-Only-consider-the-SSL-handshake.patch
  50. +45
    -0
      net/haproxy/patches/047-BUG-MINOR-stktable-report-the-current-proxy-name-in-error-messages.patch
  51. +69
    -0
      net/haproxy/patches/048-BUG-MEDIUM-mux-h2-make-sure-we-dont-emit-TE-headers-with-anything-but-trailers.patch
  52. +50
    -0
      net/haproxy/patches/049-BUILD-cfgparse-silence-a-bogus-gcc-warning-on-32-bit-machines.patch
  53. +38
    -0
      net/haproxy/patches/050-MINOR-lua-Add-hlua_prepend_path-function.patch
  54. +98
    -0
      net/haproxy/patches/051-MINOR-lua-Add-lua-prepend-path-configuration-option.patch
  55. +62
    -0
      net/haproxy/patches/052-MINOR-lua-Add-HLUA_PREPEND_C-PATH-build-option.patch
  56. +36
    -0
      net/haproxy/patches/053-BUG-MEDIUM-ssl-Dont-forget-to-free-ctx--ssl-on-failure.patch
  57. +33
    -0
      net/haproxy/patches/054-BUG-MINOR-tcpchecks-fix-the-connect-flags-regarding-delayed-ack.patch
  58. +2
    -2
      net/haproxy/patches/055-OPENWRT-add-uclibc-support.patch

+ 6
- 38
net/haproxy/Makefile View File

@ -10,12 +10,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=haproxy
PKG_VERSION:=2.0.12
PKG_VERSION:=2.1.2
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://www.haproxy.org/download/2.0/src
PKG_HASH:=7fcf5adb21cd78c4161902f9fcc8d7fc97e1562319a992cbda884436ca9602fd
PKG_SOURCE_URL:=https://www.haproxy.org/download/2.1/src
PKG_HASH:=6079b08a8905ade5a9a2835ead8963ee10a855d8508a85efb7181eea2d310b77
PKG_MAINTAINER:=Thomas Heil <heil@terminal-consulting.de>, \
Christian Lachner <gladiac@gmail.com>
@ -33,18 +33,8 @@ define Package/haproxy/Default
URL:=https://www.haproxy.org/
endef
define Download/lua535
FILE:=lua-5.3.5.tar.gz
URL:=https://www.lua.org/ftp/
HASH:=0c2eed3f960446e1a3e4b9a1ca2f3ff893b6ce41942cf54d5dd59ab4b3b058ac
endef
define Build/Prepare
$(call Build/Prepare/Default)
ifeq ($(ENABLE_LUA),y)
tar -zxvf $(DL_DIR)/lua-5.3.5.tar.gz -C $(PKG_BUILD_DIR)
ln -s $(PKG_BUILD_DIR)/lua-5.3.5 $(PKG_BUILD_DIR)/lua
endif
endef
define Package/haproxy/Default/conffiles
@ -56,7 +46,7 @@ define Package/haproxy/Default/description
endef
define Package/haproxy
DEPENDS+= +libpcre +libltdl +zlib +libpthread +libopenssl +libncurses +libreadline +libatomic
DEPENDS+= +libpcre +libltdl +zlib +libpthread +liblua5.3 +libopenssl +libncurses +libreadline +libatomic
TITLE+= (with SSL support)
VARIANT:=ssl
$(call Package/haproxy/Default)
@ -74,7 +64,7 @@ endef
define Package/haproxy-nossl
TITLE+= (without SSL support)
VARIANT:=nossl
DEPENDS+= +libpcre +libltdl +zlib +libpthread +libatomic
DEPENDS+= +libpcre +libltdl +zlib +libpthread +liblua5.3 +libatomic
TITLE+= (without SSL support)
$(call Package/haproxy/Default)
endef
@ -106,37 +96,16 @@ ifeq ($(BUILD_VARIANT),ssl)
ADDON+=ADDLIB="-lcrypto -lm "
endif
ifeq ($(ENABLE_LUA),y)
ADDON+=USE_LUA=1
ADDON+=LUA_LIB_NAME="lua535"
ADDON+=LUA_INC="$(STAGING_DIR)/lua-5.3.5/include"
ADDON+=LUA_LIB="$(STAGING_DIR)/lua-5.3.5/lib"
endif
ifeq ($(ENABLE_REGPARM),y)
ADDON+=USE_REGPARM=1
endif
ifeq ($(ENABLE_LUA),y)
define Build/Compile/lua
$(MAKE) TARGET=$(LINUX_TARGET) -C $(PKG_BUILD_DIR)/lua \
INSTALL_TOP="$(STAGING_DIR)/lua-5.3.5/" \
CC="$(TARGET_CC)" \
CFLAGS="$(TARGET_CFLAGS) $(TARGET_CPPFLAGS)" \
LDFLAGS="$(TARGET_LDFLAGS) -lncurses -lreadline" \
LD="$(TARGET_LD)" \
linux install
mv $(STAGING_DIR)/lua-5.3.5/lib/liblua.a $(STAGING_DIR)/lua-5.3.5/lib/liblua535.a
endef
endif
define Build/Compile
$(call Build/Compile/lua)
$(MAKE) TARGET=$(LINUX_TARGET) -C $(PKG_BUILD_DIR) \
DESTDIR="$(PKG_INSTALL_DIR)" \
CC="$(TARGET_CC)" \
PCREDIR="$(STAGING_DIR)/usr/" \
USE_LUA=1 LUA_LIB_NAME="lua5.3" LUA_INC="$(STAGING_DIR)/usr/include/lua5.3" LUA_LIB="$(STAGING_DIR)/usr/lib" \
SMALL_OPTS="-DBUFSIZE=16384 -DMAXREWRITE=1030 -DSYSTEM_MAXCONN=165530" \
USE_LINUX_TPROXY=1 USE_LINUX_SPLICE=1 USE_TFO=1 USE_NS=1 \
USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_GETADDRINFO=1 \
@ -191,7 +160,6 @@ define Package/halog/install
$(INSTALL_BIN) $(PKG_BUILD_DIR)/contrib/halog/halog $(1)/usr/bin/
endef
$(eval $(call Download,lua535))
$(eval $(call BuildPackage,haproxy))
$(eval $(call BuildPackage,halog))
$(eval $(call BuildPackage,haproxy-nossl))

+ 2
- 2
net/haproxy/get-latest-patches.sh View File

@ -1,7 +1,7 @@
#!/bin/sh
CLONEURL=https://git.haproxy.org/git/haproxy-2.0.git
BASE_TAG=v2.0.12
CLONEURL=https://git.haproxy.org/git/haproxy-2.1.git
BASE_TAG=v2.1.2
TMP_REPODIR=tmprepo
PATCHESDIR=patches


+ 101
- 0
net/haproxy/patches/000-BUG-MINOR-checks-refine-which-errno-values-are-really-errors.patch View File

@ -0,0 +1,101 @@
commit 4b50ea9da7c098b22a4572f7b609bed7bab03cdb
Author: Willy Tarreau <w@1wt.eu>
Date: Fri Dec 27 12:03:27 2019 +0100
BUG/MINOR: checks: refine which errno values are really errors.
Two regtest regularly fail in a random fashion depending on the machine's
load (one could really wonder if it's really worth keeping such
unreproducible tests) :
- tcp-check_multiple_ports.vtc
- 4be_1srv_smtpchk_httpchk_layer47errors.vtc
It happens that one of the reason is the time it takes to connect to
the local socket (hence the load-dependent aspect): if connect() on the
loopback returns EINPROGRESS then this status is reported instead of a
real error. Normally such a test is expected to see the error cleaned
by tcp_connect_probe() but it really depends on the timing and instead
we may very well send() first and see this error. The problem is that
everything is collected based on errno, hoping it won't get molested
in the way from the last unsuccesful syscall to wake_srv_chk(), which
obviously is hard to guarantee.
This patch at least makes sure that a few non-errors are reported as
zero just like EAGAIN. It doesn't fix the root cause but makes it less
likely to report incorrect failures.
This fix could be backported as far as 1.9.
(cherry picked from commit c8dc20a825644bb4003ecb62e0eb2d20c8eaf6c8)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/checks.c b/src/checks.c
index ac39fec91..083aebbe0 100644
--- a/src/checks.c
+++ b/src/checks.c
@@ -137,6 +137,17 @@ static const struct analyze_status analyze_statuses[HANA_STATUS_SIZE] = { /* 0:
[HANA_STATUS_HTTP_BROKEN_PIPE] = { "Close from server (http)", { 0, 1 }},
};
+/* checks if <err> is a real error for errno or one that can be ignored, and
+ * return 0 for these ones or <err> for real ones.
+ */
+static inline int unclean_errno(int err)
+{
+ if (err == EAGAIN || err == EINPROGRESS ||
+ err == EISCONN || err == EALREADY)
+ return 0;
+ return err;
+}
+
/*
* Convert check_status code to description
*/
@@ -548,7 +559,7 @@ static int retrieve_errno_from_socket(struct connection *conn)
int skerr;
socklen_t lskerr = sizeof(skerr);
- if (conn->flags & CO_FL_ERROR && ((errno && errno != EAGAIN) || !conn->ctrl))
+ if (conn->flags & CO_FL_ERROR && (unclean_errno(errno) || !conn->ctrl))
return 1;
if (!conn_ctrl_ready(conn))
@@ -557,8 +568,7 @@ static int retrieve_errno_from_socket(struct connection *conn)
if (getsockopt(conn->handle.fd, SOL_SOCKET, SO_ERROR, &skerr, &lskerr) == 0)
errno = skerr;
- if (errno == EAGAIN)
- errno = 0;
+ errno = unclean_errno(errno);
if (!errno) {
/* we could not retrieve an error, that does not mean there is
@@ -599,8 +609,8 @@ static void chk_report_conn_err(struct check *check, int errno_bck, int expired)
if (check->result != CHK_RES_UNKNOWN)
return;
- errno = errno_bck;
- if (conn && (!errno || errno == EAGAIN))
+ errno = unclean_errno(errno_bck);
+ if (conn && errno)
retrieve_errno_from_socket(conn);
if (conn && !(conn->flags & CO_FL_ERROR) &&
@@ -644,7 +654,7 @@ static void chk_report_conn_err(struct check *check, int errno_bck, int expired)
}
if (conn && conn->err_code) {
- if (errno && errno != EAGAIN)
+ if (unclean_errno(errno))
chunk_printf(&trash, "%s (%s)%s", conn_err_code_str(conn), strerror(errno),
chk->area);
else
@@ -653,7 +663,7 @@ static void chk_report_conn_err(struct check *check, int errno_bck, int expired)
err_msg = trash.area;
}
else {
- if (errno && errno != EAGAIN) {
+ if (unclean_errno(errno)) {
chunk_printf(&trash, "%s%s", strerror(errno),
chk->area);
err_msg = trash.area;

+ 46
- 0
net/haproxy/patches/001-BUG-MEDIUM-checks-Only-attempt-to-do-handshakes-if-the-connection-is-ready.patch View File

@ -0,0 +1,46 @@
commit 68265b3993d68cc7af5fc0f70bcfa35d52ffa99d
Author: Olivier Houchard <cognet@ci0.org>
Date: Mon Dec 30 15:13:42 2019 +0100
BUG/MEDIUM: checks: Only attempt to do handshakes if the connection is ready.
When creating a new check connection, only attempt to add an handshake
connection if the connection has fully been initialized. It can not be the
case if a DNS resolution is still pending, and thus we don't yet have the
address for the server, as the handshake code assumes the connection is fully
initialized and would otherwise crash.
This is not ideal, the check shouldn't probably run until we have an address,
as it leads to check failures with "Socket error".
While I'm there, also add an xprt handshake if we're using socks4, otherwise
checks wouldn't be able to use socks4 properly.
This should fix github issue #430
This should be backported to 2.0 and 2.1.
(cherry picked from commit 37d7897aafc412f3c4a4a68a1dccbd6b5d6cb180)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/checks.c b/src/checks.c
index 083aebbe0..2b7fc09c6 100644
--- a/src/checks.c
+++ b/src/checks.c
@@ -1715,6 +1715,9 @@ static int connect_conn_chk(struct task *t)
if (s->check.send_proxy && !(check->state & CHK_ST_AGENT)) {
conn->send_proxy_ofs = 1;
conn->flags |= CO_FL_SEND_PROXY;
+ }
+ if (conn->flags & (CO_FL_SEND_PROXY | CO_FL_SOCKS4) &&
+ conn_ctrl_ready(conn)) {
if (xprt_add_hs(conn) < 0)
ret = SF_ERR_RESOURCE;
}
@@ -2960,7 +2963,8 @@ static int tcpcheck_main(struct check *check)
if (proto && proto->connect)
ret = proto->connect(conn,
CONNECT_HAS_DATA /* I/O polling is always needed */ | (next && next->action == TCPCHK_ACT_EXPECT) ? 0 : CONNECT_DELACK_ALWAYS);
- if (check->current_step->conn_opts & TCPCHK_OPT_SEND_PROXY) {
+ if (conn_ctrl_ready(conn) &&
+ check->current_step->conn_opts & TCPCHK_OPT_SEND_PROXY) {
conn->send_proxy_ofs = 1;
conn->flags |= CO_FL_SEND_PROXY;
if (xprt_add_hs(conn) < 0)

+ 37
- 0
net/haproxy/patches/002-BUG-MEDIUM-connections-Hold-the-lock-when-wanting-to-kill-a-connection.patch View File

@ -0,0 +1,37 @@
commit 000f227a4cfdb019575e889638f9e0e5a53bbb0b
Author: Olivier Houchard <cognet@ci0.org>
Date: Mon Dec 30 18:15:40 2019 +0100
BUG/MEDIUM: connections: Hold the lock when wanting to kill a connection.
In connect_server(), when we decide we want to kill the connection of
another thread because there are too many idle connections, hold the
toremove_lock of the corresponding thread, othervise, there's a small race
condition where we could try to add the connection to the toremove_connections
list while it has already been free'd.
This should be backported to 2.0 and 2.1.
(cherry picked from commit 140237471e408736bb7162e68c572c710a66a526)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/backend.c b/src/backend.c
index ebc5050cb..be081a5e1 100644
--- a/src/backend.c
+++ b/src/backend.c
@@ -1295,6 +1295,7 @@ int connect_server(struct stream *s)
// see it possibly larger.
ALREADY_CHECKED(i);
+ HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
tokill_conn = MT_LIST_POP(&srv->idle_orphan_conns[i],
struct connection *, list);
if (tokill_conn) {
@@ -1305,6 +1306,7 @@ int connect_server(struct stream *s)
task_wakeup(idle_conn_cleanup[i], TASK_WOKEN_OTHER);
break;
}
+ HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
}
}

+ 47
- 0
net/haproxy/patches/003-MINOR-config-disable-busy-polling-on-old-processes.patch View File

@ -0,0 +1,47 @@
commit 493c8d8d3c2f710d47b2bdd6a8ea582a84c1cf72
Author: William Dauchy <w.dauchy@criteo.com>
Date: Sat Dec 28 15:36:02 2019 +0100
MINOR: config: disable busy polling on old processes
in the context of seamless reload and busy polling, older processes will
create unecessary cpu conflicts; we can assume there is no need for busy
polling for old processes which are waiting to be terminated.
This patch is not a bug fix itself but might be a good stability
improvment when you are un the context of frequent seamless reloads with
a high "hard-stop-after" value; for that reasons I think this patch
should be backported in all 2.x versions.
Signed-off-by: William Dauchy <w.dauchy@criteo.com>
(cherry picked from commit 3894d97fb8b66e247c5a326c6b3aa75816c597dc)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/doc/configuration.txt b/doc/configuration.txt
index d34ed808b..3f381e386 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -1436,6 +1436,10 @@ busy-polling
prefixing it with the "no" keyword. It is ignored by the "select" and
"poll" pollers.
+ This option is automatically disabled on old processes in the context of
+ seamless reload; it avoids too much cpu conflicts when multiple processes
+ stay around for some time waiting for the end of their current connections.
+
max-spread-checks <delay in milliseconds>
By default, haproxy tries to spread the start of health checks across the
smallest health check interval of all the servers in a farm. The principle is
diff --git a/src/proxy.c b/src/proxy.c
index 7ba6b5839..1abd6654f 100644
--- a/src/proxy.c
+++ b/src/proxy.c
@@ -1117,6 +1117,8 @@ void soft_stop(void)
struct task *task;
stopping = 1;
+ /* disable busy polling to avoid cpu eating for the new process */
+ global.tune.options &= ~GTUNE_BUSY_POLLING;
if (tick_isset(global.hard_stop_after)) {
task = task_new(MAX_THREADS_MASK);
if (task) {

+ 39
- 0
net/haproxy/patches/004-MINOR-ssl-Remove-unused-variable-need_out.patch View File

@ -0,0 +1,39 @@
commit fbe15b7184da09c0d71051bf3978540f63aba5cc
Author: Olivier Houchard <cognet@ci0.org>
Date: Sun Jan 5 16:45:14 2020 +0100
MINOR: ssl: Remove unused variable "need_out".
The "need_out" variable was used to let the ssl code know we're done
reading early data, and we should start the handshake.
Now that the handshake function is responsible for taking care of reading
early data, all that logic has been removed from ssl_sock_to_buf(), but
need_out was forgotten, and left. Remove it know.
This patch was submitted by William Dauchy <w.dauchy@criteo.com>, and should
fix github issue #434.
This should be backported to 2.0 and 2.1.
(cherry picked from commit 7f4f7f140f6b03b61d1b38260962db235c42c121)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 7c62299a0..1fac2d905 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -6385,7 +6385,6 @@ static size_t ssl_sock_to_buf(struct connection *conn, void *xprt_ctx, struct bu
* EINTR too.
*/
while (count > 0) {
- int need_out = 0;
try = b_contig_space(buf);
if (!try)
@@ -6443,8 +6442,6 @@ static size_t ssl_sock_to_buf(struct connection *conn, void *xprt_ctx, struct bu
/* otherwise it's a real error */
goto out_error;
}
- if (need_out)
- break;
}
leave:
return done;

+ 55
- 0
net/haproxy/patches/005-BUG-MINOR-h1-Report-the-right-error-position-when-a-header-value-is-invalid.patch View File

@ -0,0 +1,55 @@
commit e313c1bd5901b721bdfd23714c432235625a87a8
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Mon Jan 6 13:41:01 2020 +0100
BUG/MINOR: h1: Report the right error position when a header value is invalid
During H1 messages parsing, when the parser has finished to parse a full header
line, some tests are performed on its value, depending on its name, to be sure
it is valid. The content-length is checked and converted in integer and the host
header is also checked. If an error occurred during this step, the error
position must point on the header value. But from the parser point of view, we
are already on the start of the next header. Thus the effective reported
position in the error capture is the beginning of the unparsed header line. It
is a bit confusing when we try to figure out why a message is rejected.
Now, the parser state is updated to point on the invalid value. This way, the
error position really points on the right position.
This patch must be backported as far as 1.9.
(cherry picked from commit 1703478e2dd6bd12bb03b0a0fdcc7cd4a611dafc)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/h1.c b/src/h1.c
index 15827db56..63fbee8c0 100644
--- a/src/h1.c
+++ b/src/h1.c
@@ -819,6 +819,7 @@ int h1_headers_to_hdr_list(char *start, const char *stop,
if (ret < 0) {
state = H1_MSG_HDR_L2_LWS;
+ ptr = v.ptr; /* Set ptr on the error */
goto http_msg_invalid;
}
else if (ret == 0) {
@@ -841,16 +842,18 @@ int h1_headers_to_hdr_list(char *start, const char *stop,
if (authority.len && !isteqi(v, authority)) {
if (h1m->err_pos < -1) {
state = H1_MSG_HDR_L2_LWS;
+ ptr = v.ptr; /* Set ptr on the error */
goto http_msg_invalid;
}
if (h1m->err_pos == -1) /* capture the error pointer */
- h1m->err_pos = ptr - start + skip; /* >= 0 now */
+ h1m->err_pos = v.ptr - start + skip; /* >= 0 now */
}
host_idx = hdr_count;
}
else {
if (!isteqi(v, hdr[host_idx].v)) {
state = H1_MSG_HDR_L2_LWS;
+ ptr = v.ptr; /* Set ptr on the error */
goto http_msg_invalid;
}
/* if the same host, skip it */

+ 32
- 0
net/haproxy/patches/006-BUG-MINOR-proxy-Fix-input-data-copy-when-an-error-is-captured.patch View File

@ -0,0 +1,32 @@
commit 8015ba0c4a9333967059bdf7c302f7a71e5ec5ea
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Mon Jan 6 11:37:00 2020 +0100
BUG/MINOR: proxy: Fix input data copy when an error is captured
In proxy_capture_error(), input data are copied in the error snapshot. The copy
must take care of the data wrapping. But the length of the first block is
wrong. It should be the amount of contiguous input data that can be copied
starting from the input's beginning. But the mininum between the input length
and the buffer size minus the input length is used instead. So it is a problem
if input data are wrapping or if more than the half of the buffer is used by
input data.
This patch must be backported as far as 1.9.
(cherry picked from commit 47a7210b9d377d91777f39241fab54d5f83b2728)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/proxy.c b/src/proxy.c
index 1abd6654f..2d02b1b5d 100644
--- a/src/proxy.c
+++ b/src/proxy.c
@@ -1546,7 +1546,7 @@ void proxy_capture_error(struct proxy *proxy, int is_back,
es->buf_len = buf_len;
es->ev_id = ev_id;
- len1 = b_size(buf) - buf_len;
+ len1 = b_size(buf) - b_peek_ofs(buf, buf_out);
if (len1 > buf_len)
len1 = buf_len;

+ 40
- 0
net/haproxy/patches/007-BUG-MEDIUM-http-ana-Truncate-the-response-when-a-redirect-rule-is-applied.patch View File

@ -0,0 +1,40 @@
commit 219f7cb9e3eb061103c3c013a6ecf13d38281247
Author: Kevin Zhu <ip0tcp@gmail.com>
Date: Tue Jan 7 09:42:55 2020 +0100
BUG/MEDIUM: http-ana: Truncate the response when a redirect rule is applied
When a redirect rule is executed on the response path, we must truncate the
received response. Otherwise, the redirect is appended after the response, which
is sent to the client. So it is obviously a bug because the redirect is not
performed. With bodyless responses, it is the "only" bug. But if the response
has a body, the result may be invalid. If the payload is not fully received yet
when the redirect is performed, an internal error is reported.
It must be backported as far as 1.9.
(cherry picked from commit 96b363963f4a4a63823718966798f177a72936b6)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/http_ana.c b/src/http_ana.c
index ee00d2c76..268796d2e 100644
--- a/src/http_ana.c
+++ b/src/http_ana.c
@@ -2526,6 +2526,8 @@ int http_apply_redirect_rule(struct redirect_rule *rule, struct stream *s, struc
close = 1;
htx = htx_from_buf(&res->buf);
+ /* Trim any possible response */
+ channel_htx_truncate(&s->res, htx);
flags = (HTX_SL_F_IS_RESP|HTX_SL_F_VER_11|HTX_SL_F_XFER_LEN|HTX_SL_F_BODYLESS);
sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, ist("HTTP/1.1"), status, reason);
if (!sl)
@@ -2553,6 +2555,8 @@ int http_apply_redirect_rule(struct redirect_rule *rule, struct stream *s, struc
if (!htx_add_endof(htx, HTX_BLK_EOH) || !htx_add_endof(htx, HTX_BLK_EOM))
goto fail;
+ htx_to_buf(htx, &res->buf);
+
/* let's log the request time */
s->logs.tv_request = now;

+ 34
- 0
net/haproxy/patches/008-BUG-MINOR-channel-inject-output-data-at-the-end-of-output.patch View File

@ -0,0 +1,34 @@
commit a96cbaa1e30e23bf91b7a4fb46857b4b2823deea
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Tue Jan 7 10:01:57 2020 +0100
BUG/MINOR: channel: inject output data at the end of output
In co_inject(), data must be inserted at the end of output, not the end of
input. For the record, this function does not take care of input data which are
supposed to not exist. But the caller may reset input data after or before the
call. It is its own choice.
This bug, among other effects, is visible when a redirect is performed on
the response path, on legacy HTTP mode (so for HAProxy < 2.1). The redirect
response is appended after the server response when it should overwrite it.
Thanks to Kevin Zhu <ip0tcp@gmail.com> to report the bug. It must be backported
as far as 1.9.
(cherry picked from commit 584348be636fcc9f41b80ef0fde03c7899d75cd7)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/channel.c b/src/channel.c
index d4a46ffed..8b0854ef5 100644
--- a/src/channel.c
+++ b/src/channel.c
@@ -96,7 +96,7 @@ int co_inject(struct channel *chn, const char *msg, int len)
if (len > max)
return max;
- memcpy(ci_tail(chn), msg, len);
+ memcpy(co_tail(chn), msg, len);
b_add(&chn->buf, len);
c_adv(chn, len);
chn->total += len;

+ 39
- 0
net/haproxy/patches/009-BUG-MEDIUM-session-do-not-report-a-failure-when-rejecting-a-session.patch View File

@ -0,0 +1,39 @@
commit 1d12549a19c06f84c934c87487a58b8f63d205ea
Author: Willy Tarreau <w@1wt.eu>
Date: Tue Jan 7 18:03:09 2020 +0100
BUG/MEDIUM: session: do not report a failure when rejecting a session
In session_accept_fd() we can perform a synchronous call to
conn_complete_session() and if it succeeds the connection is accepted
and turned into a session. If it fails we take it as an error while it
is not, in this case, it's just that a tcp-request rule has decided to
reject the incoming connection. The problem with reporting such an event
as an error is that the failed status is passed down to the listener code
which decides to disable accept() for 100ms in order to leave some time
for transient issues to vanish, and that's not what we want to do here.
This fix must be backported as far as 1.7. In 1.7 the code is a bit
different as tcp_exec_l5_rules() is called directly from within
session_new_fd() and ret=0 must be assigned there.
(cherry picked from commit e5891ca6c14c46d5f3a2169ede75b7fbb225216f)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/session.c b/src/session.c
index 126ba78a6..111fc61e3 100644
--- a/src/session.c
+++ b/src/session.c
@@ -288,6 +288,12 @@ int session_accept_fd(struct listener *l, int cfd, struct sockaddr_storage *addr
if (conn_complete_session(cli_conn) >= 0)
return 1;
+ /* if we reach here we have deliberately decided not to keep this
+ * session (e.g. tcp-request rule), so that's not an error we should
+ * try to protect against.
+ */
+ ret = 0;
+
/* error unrolling */
out_free_sess:
/* prevent call to listener_release during session_free. It will be

+ 37
- 0
net/haproxy/patches/010-BUG-MINOR-stream-int-Dont-trigger-L7-retry-if-max-retries-is-already-reached.patch View File

@ -0,0 +1,37 @@
commit 48cd95b6a516562af382930adcc0eabfdb652487
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Thu Jan 9 14:31:13 2020 +0100
BUG/MINOR: stream-int: Don't trigger L7 retry if max retries is already reached
When an HTTP response is received, at the stream-interface level, if a L7 retry
must be triggered because of the status code, the response is trashed and a read
error is reported on the response channel. Then the stream handles this error
and perform the retry. Except if the maximum connection retries is reached. In
this case, an error is reported. Because the server response was already trashed
by the stream-interface, a generic 502 error is returned to the client instead
of the server's one.
Now, the stream-interface triggers a L7 retry only if the maximum connection
retries is not already reached. Thus, at the end, the last server's response is
returned.
This patch must be backported to 2.1 and 2.0. It should fix the issue #439.
(cherry picked from commit 48726b78e57a69bfcdce624a3a5905c781d5eec0)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/stream_interface.c b/src/stream_interface.c
index 1d84ca9ad..012ac71e0 100644
--- a/src/stream_interface.c
+++ b/src/stream_interface.c
@@ -1372,7 +1372,8 @@ int si_cs_recv(struct conn_stream *cs)
break;
}
- if (si->flags & SI_FL_L7_RETRY) {
+ /* L7 retries enabled and maximum connection retries not reached */
+ if ((si->flags & SI_FL_L7_RETRY) && si->conn_retries) {
struct htx *htx;
struct htx_sl *sl;

+ 30
- 0
net/haproxy/patches/011-BUG-MEDIUM-tasks-Use-the-MT-macros-in-tasklet_free.patch View File

@ -0,0 +1,30 @@
commit 5e06a678544b1fde2517a10041e802265f098e0b
Author: Olivier Houchard <ohouchard@haproxy.com>
Date: Fri Jan 10 16:46:48 2020 +0100
BUG/MEDIUM: tasks: Use the MT macros in tasklet_free().
In tasklet_free(), to attempt to remove ourself, use MT_LIST_DEL, we can't
just use LIST_DEL(), as we theorically could be in the shared tasklet list.
This should be backported to 2.1.
(cherry picked from commit 3c4f40acbf6cd33b874b224a89ee2a64eb3035d5)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/include/proto/task.h b/include/proto/task.h
index bba71930c..f237d0dd2 100644
--- a/include/proto/task.h
+++ b/include/proto/task.h
@@ -397,10 +397,8 @@ static inline void task_destroy(struct task *t)
/* Should only be called by the thread responsible for the tasklet */
static inline void tasklet_free(struct tasklet *tl)
{
- if (!LIST_ISEMPTY(&tl->list)) {
- LIST_DEL(&tl->list);
+ if (MT_LIST_DEL((struct mt_list *)&tl->list))
_HA_ATOMIC_SUB(&tasks_run_queue, 1);
- }
pool_free(pool_head_tasklet, tl);
if (unlikely(stopping))

+ 153
- 0
net/haproxy/patches/012-BUG-MINOR-mux-h2-use-a-safe-list_for_each_entry-in-h2_send.patch View File

@ -0,0 +1,153 @@
commit 449f28322fb45688dacc80bead89fe75f3dd75db
Author: Willy Tarreau <w@1wt.eu>
Date: Fri Jan 10 17:01:29 2020 +0100
BUG/MINOR: mux-h2: use a safe list_for_each_entry in h2_send()
h2_send() uses list_for_each_entry() to scan paused streams and resume
them, but happily deletes any leftover from a previous failed unsubscribe,
which is obviously not safe and would corrupt the list. In practice this
is a proof that this doesn't happen, but it's not the best way to prove it.
In order to fix this and reduce the maintenance burden caused by code
duplication (this list walk exists at 3 places), let's introduce a new
function h2_resume_each_sending_h2s() doing exactly this and use it at
all 3 places.
This bug was introduced as a side effect of fix 998410a41b ("BUG/MEDIUM:
h2: Revamp the way send subscriptions works.") so it should be backported
as far as 1.9.
(cherry picked from commit 989539b048bef502a474553a8e330a3d318edb6c)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/mux_h2.c b/src/mux_h2.c
index be9dae928..92a50da24 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -3251,13 +3251,41 @@ static void h2_process_demux(struct h2c *h2c)
TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
}
+/* resume each h2s eligible for sending in list head <head> */
+static void h2_resume_each_sending_h2s(struct h2c *h2c, struct list *head)
+{
+ struct h2s *h2s, *h2s_back;
+
+ TRACE_ENTER(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
+
+ list_for_each_entry_safe(h2s, h2s_back, head, list) {
+ if (h2c->mws <= 0 ||
+ h2c->flags & H2_CF_MUX_BLOCK_ANY ||
+ h2c->st0 >= H2_CS_ERROR)
+ break;
+
+ h2s->flags &= ~H2_SF_BLK_ANY;
+ /* For some reason, the upper layer failed to subscribe again,
+ * so remove it from the send_list
+ */
+ if (!h2s->send_wait) {
+ LIST_DEL_INIT(&h2s->list);
+ continue;
+ }
+
+ h2s->send_wait->events &= ~SUB_RETRY_SEND;
+ LIST_ADDQ(&h2c->sending_list, &h2s->sending_list);
+ tasklet_wakeup(h2s->send_wait->tasklet);
+ }
+
+ TRACE_LEAVE(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
+}
+
/* process Tx frames from streams to be multiplexed. Returns > 0 if it reached
* the end.
*/
static int h2_process_mux(struct h2c *h2c)
{
- struct h2s *h2s, *h2s_back;
-
TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
@@ -3290,47 +3318,8 @@ static int h2_process_mux(struct h2c *h2c)
* waiting there were already elected for immediate emission but were
* blocked just on this.
*/
-
- list_for_each_entry_safe(h2s, h2s_back, &h2c->fctl_list, list) {
- if (h2c->mws <= 0 || h2c->flags & H2_CF_MUX_BLOCK_ANY ||
- h2c->st0 >= H2_CS_ERROR)
- break;
-
- if (LIST_ADDED(&h2s->sending_list))
- continue;
-
- h2s->flags &= ~H2_SF_BLK_ANY;
- /* For some reason, the upper layer failed to subsribe again,
- * so remove it from the send_list
- */
- if (!h2s->send_wait) {
- LIST_DEL_INIT(&h2s->list);
- continue;
- }
- h2s->send_wait->events &= ~SUB_RETRY_SEND;
- LIST_ADDQ(&h2c->sending_list, &h2s->sending_list);
- tasklet_wakeup(h2s->send_wait->tasklet);
- }
-
- list_for_each_entry_safe(h2s, h2s_back, &h2c->send_list, list) {
- if (h2c->st0 >= H2_CS_ERROR || h2c->flags & H2_CF_MUX_BLOCK_ANY)
- break;
-
- if (LIST_ADDED(&h2s->sending_list))
- continue;
-
- /* For some reason, the upper layer failed to subsribe again,
- * so remove it from the send_list
- */
- if (!h2s->send_wait) {
- LIST_DEL_INIT(&h2s->list);
- continue;
- }
- h2s->flags &= ~H2_SF_BLK_ANY;
- h2s->send_wait->events &= ~SUB_RETRY_SEND;
- LIST_ADDQ(&h2c->sending_list, &h2s->sending_list);
- tasklet_wakeup(h2s->send_wait->tasklet);
- }
+ h2_resume_each_sending_h2s(h2c, &h2c->fctl_list);
+ h2_resume_each_sending_h2s(h2c, &h2c->send_list);
fail:
if (unlikely(h2c->st0 >= H2_CS_ERROR)) {
@@ -3514,30 +3503,9 @@ static int h2_send(struct h2c *h2c)
/* We're not full anymore, so we can wake any task that are waiting
* for us.
*/
- if (!(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM)) && h2c->st0 >= H2_CS_FRAME_H) {
- struct h2s *h2s;
-
- list_for_each_entry(h2s, &h2c->send_list, list) {
- if (h2c->st0 >= H2_CS_ERROR || h2c->flags & H2_CF_MUX_BLOCK_ANY)
- break;
-
- if (LIST_ADDED(&h2s->sending_list))
- continue;
+ if (!(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM)) && h2c->st0 >= H2_CS_FRAME_H)
+ h2_resume_each_sending_h2s(h2c, &h2c->send_list);
- /* For some reason, the upper layer failed to subsribe again,
- * so remove it from the send_list
- */
- if (!h2s->send_wait) {
- LIST_DEL_INIT(&h2s->list);
- continue;
- }
- h2s->flags &= ~H2_SF_BLK_ANY;
- h2s->send_wait->events &= ~SUB_RETRY_SEND;
- TRACE_DEVEL("waking up pending stream", H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn, h2s);
- tasklet_wakeup(h2s->send_wait->tasklet);
- LIST_ADDQ(&h2c->sending_list, &h2s->sending_list);
- }
- }
/* We're done, no more to send */
if (!br_data(h2c->mbuf)) {
TRACE_DEVEL("leaving with everything sent", H2_EV_H2C_SEND, h2c->conn);

+ 32
- 0
net/haproxy/patches/013-BUG-MEDIUM-mux-h2-fix-missing-test-on-sending_list-in-previous-patch.patch View File

@ -0,0 +1,32 @@
commit d6c19ac2c0458445e521fd08eded304c26eecfe7
Author: Willy Tarreau <w@1wt.eu>
Date: Fri Jan 10 18:20:15 2020 +0100
BUG/MEDIUM: mux-h2: fix missing test on sending_list in previous patch
Previous commit 989539b048 ("BUG/MINOR: mux-h2: use a safe
list_for_each_entry in h2_send()") accidently lost its sending_list test,
resulting in some elements to be woken up again while already in the
sending_list and h2_unsubscribe() crashing on integrity tests (only
when built with DEBUG_DEV).
If the fix above is backported this one must be as well.
(cherry picked from commit 70c5b0e5fd5ad243f4645b37a0f89068de97e90e)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 92a50da24..d46a316ac 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -3265,6 +3265,10 @@ static void h2_resume_each_sending_h2s(struct h2c *h2c, struct list *head)
break;
h2s->flags &= ~H2_SF_BLK_ANY;
+
+ if (LIST_ADDED(&h2s->sending_list))
+ continue;
+
/* For some reason, the upper layer failed to subscribe again,
* so remove it from the send_list
*/

+ 79
- 0
net/haproxy/patches/014-BUG-MEDIUM-mux-h2-dont-stop-sending-when-crossing-a-buffer-boundary.patch View File

@ -0,0 +1,79 @@
commit eb134e46e41b06f6022f1c9a481205a8180515bd
Author: Willy Tarreau <w@1wt.eu>
Date: Tue Jan 14 11:42:59 2020 +0100
BUG/MEDIUM: mux-h2: don't stop sending when crossing a buffer boundary
In version 2.0, after commit 9c218e7521 ("MAJOR: mux-h2: switch to next
mux buffer on buffer full condition."), the H2 mux started to use a ring
buffer for the output data in order to reduce competition between streams.
However, one corner case was suboptimally covered: when crossing a buffer
boundary, we have to shrink the outgoing frame size to the one left in
the output buffer, but this shorter size is later used as a signal of
incomplete send due to a buffer full condition (which used to be true when
using a single buffer). As a result, function h2s_frt_make_resp_data()
used to return less than requested, which in turn would cause h2_snd_buf()
to stop sending and leave some unsent data in the buffer, and si_cs_send()
to subscribe for sending more later.
But it goes a bit further than this, because subscribing to send again
causes the mux's send_list not to be empty anymore, hence extra streams
can be denied the access to the mux till the first stream is woken again.
This causes a nasty wakeup-sleep dance between streams that makes it
totally impractical to try to remove the sending list. A test showed
that it was possible to observe 3 million h2_snd_buf() giveups for only
100k requests when using 100 concurrent streams on 20kB objects.
It doesn't seem likely that a stream could get blocked and time out due
to this bug, though it's not possible either to demonstrate the opposite.
One risk is that incompletely sent streams do not have any blocking flags
so they may not be identified as blocked. However on first scan of the
send_list they meet all conditions for a wakeup.
This patch simply allows to continue on a new frame after a partial
frame. with only this change, the number of failed h2_snd_buf() was
divided by 800 (4% of calls). And by slightly increasing the H2C_MBUF_CNT
size, it can go down to zero.
This fix must be backported to 2.1 and 2.0.
(cherry picked from commit c7ce4e3e7fb2d7f9f037b4df318df7d6e23e8f7a)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/mux_h2.c b/src/mux_h2.c
index d46a316ac..8a82f60fd 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -5157,6 +5157,7 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, size_t
struct htx_blk *blk;
enum htx_blk_type type;
int idx;
+ int trunc_out; /* non-zero if truncated on out buf */
TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
@@ -5183,6 +5184,7 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, size_t
type = htx_get_blk_type(blk); // DATA or EOM
bsize = htx_get_blksz(blk);
fsize = bsize;
+ trunc_out = 0;
if (type == HTX_BLK_EOM) {
if (h2s->flags & H2_SF_ES_SENT) {
@@ -5345,6 +5347,7 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, size_t
b_data(mbuf) <= MAX_DATA_REALIGN)
goto realign_again;
fsize = outbuf.size - 9;
+ trunc_out = 1;
if (fsize <= 0) {
/* no need to send an empty frame here */
@@ -5402,6 +5405,8 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, size_t
} else {
/* we've truncated this block */
htx_cut_data_blk(htx, blk, fsize);
+ if (trunc_out)
+ goto new_frame;
}
if (es_now) {

+ 37
- 0
net/haproxy/patches/015-BUG-MINOR-cli-mworker-cant-start-haproxy-with-2-programs.patch View File

@ -0,0 +1,37 @@
commit 796ff4d975bde2bb7fda2876a31bbdc697c2b2ba
Author: William Lallemand <wlallemand@haproxy.com>
Date: Tue Jan 14 15:25:02 2020 +0100
BUG/MINOR: cli/mworker: can't start haproxy with 2 programs
When trying to start HAProxy with the master CLI and more than one
program in the configuration, it refuses to start with:
[ALERT] 013/132926 (1378) : parsing [cur--1:0] : proxy 'MASTER', another server named 'cur--1' was already defined at line 0, please use distinct names.
[ALERT] 013/132926 (1378) : Fatal errors found in configuration.
The problem is that haproxy tries to create a server for the MASTER
proxy but only the worker are supposed to be in the server list.
Fix issue #446.
Must be backported as far as 2.0.
(cherry picked from commit a31b09e982a76cdf8761edb25d1569cb76a8ff37)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/cli.c b/src/cli.c
index 77db8be88..d68e2b299 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -2455,6 +2455,10 @@ int mworker_cli_proxy_create()
int port1, port2, port;
struct protocol *proto;
+ /* only the workers support the master CLI */
+ if (!(child->options & PROC_O_TYPE_WORKER))
+ continue;
+
newsrv = new_server(mworker_proxy);
if (!newsrv)
goto error;

+ 60
- 0
net/haproxy/patches/016-REGTEST-mcli-mcli_start_progs-start-2-programs.patch View File

@ -0,0 +1,60 @@
commit df8af5b0953791cb27c73a7f67b8101fedab4ca0
Author: William Lallemand <wlallemand@haproxy.com>
Date: Tue Jan 14 15:38:43 2020 +0100
REGTEST: mcli/mcli_start_progs: start 2 programs
This regtest tests the issue #446 by starting 2 programs and checking if
they exist in the "show proc" of the master CLI.
Should be backported as far as 2.0.
(cherry picked from commit 25b569302167e71b32e569a2366027e8e320e80a)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/reg-tests/mcli/mcli_start_progs.vtc b/reg-tests/mcli/mcli_start_progs.vtc
new file mode 100644
index 000000000..bda9b9601
--- /dev/null
+++ b/reg-tests/mcli/mcli_start_progs.vtc
@@ -0,0 +1,40 @@
+varnishtest "Try to start a master CLI with 2 programs"
+#REGTEST_TYPE=bug
+#REQUIRE_VERSION=2.0
+#REQUIRE_BINARIES=sleep
+
+feature ignore_unknown_macro
+
+# Do nothing. Is there only to create s1_* macros
+server s1 {
+} -start
+
+haproxy h1 -W -S -conf {
+ global
+ nbproc 1
+ defaults
+ mode http
+ ${no-htx} option http-use-htx
+ timeout connect 1s
+ timeout client 1s
+ timeout server 1s
+
+ frontend myfrontend
+ bind "fd@${my_fe}"
+ default_backend test
+
+ backend test
+ server www1 ${s1_addr}:${s1_port}
+
+ program foo
+ command sleep 10
+
+ program bar
+ command sleep 10
+
+} -start
+
+haproxy h1 -mcli {
+ send "show proc"
+ expect ~ ".*foo.*\n.*bar.*\n"
+} -wait

+ 37
- 0
net/haproxy/patches/017-BUG-MEDIUM-mworker-remain-in-mworker-mode-during-reload.patch View File

@ -0,0 +1,37 @@
commit 6869fa88766cdb07564f321905c39f191da9035b
Author: William Lallemand <wlallemand@haproxy.com>
Date: Tue Jan 14 17:58:18 2020 +0100
BUG/MEDIUM: mworker: remain in mworker mode during reload
If you reload an haproxy started in master-worker mode with
"master-worker" in the configuration, and no "-W" argument,
the new process lost the fact that is was in master-worker mode
resulting in weird behaviors.
The bigest problem is that if it is reloaded with an bad configuration,
the master will exits instead of remaining in waitpid mode.
This problem was discovered in bug #443.
Should be backported in every version using the master-worker mode.
(as far as 1.8)
(cherry picked from commit 24c928c8bd86f6899d39dd5cd04b3e50b4b993a8)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/haproxy.c b/src/haproxy.c
index 10ba128d0..a66a184dc 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -1501,6 +1501,10 @@ static void init(int argc, char **argv)
memcpy(localpeer, hostname, (sizeof(hostname) > sizeof(localpeer) ? sizeof(localpeer) : sizeof(hostname)) - 1);
setenv("HAPROXY_LOCALPEER", localpeer, 1);
+ /* we were in mworker mode, we should restart in mworker mode */
+ if (getenv("HAPROXY_MWORKER_REEXEC") != NULL)
+ global.mode |= MODE_MWORKER;
+
/*
* Initialize the previously static variables.
*/

+ 31
- 0
net/haproxy/patches/018-BUG-MEDIUM-mux_h1-Dont-call-h1_send-if-we-subscribed.patch View File

@ -0,0 +1,31 @@
commit da6d362ab057217dfdd61a581c6596af4d0ac767
Author: Olivier Houchard <cognet@ci0.org>
Date: Wed Jan 15 19:13:32 2020 +0100
BUG/MEDIUM: mux_h1: Don't call h1_send if we subscribed().
In h1_snd_buf(), only attempt to call h1_send() if we haven't
already subscribed.
It makes no sense to do it if we subscribed, as we know we failed
to send before, and will create a useless call to sendto(), and
in 2.2, the call to raw_sock_from_buf() will disable polling if
it is enabled.
This should be backported to 2.2, 2.1, 2.0 and 1.9.
(cherry picked from commit 68787ef70a2e0fe19d0ab753dab8ed5c90cb4398)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/mux_h1.c b/src/mux_h1.c
index b44204845..d93a7eab5 100644
--- a/src/mux_h1.c
+++ b/src/mux_h1.c
@@ -2670,7 +2670,7 @@ static size_t h1_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t coun
break;
total += ret;
count -= ret;
- if (!h1_send(h1c))
+ if ((h1c->wait_event.events & SUB_RETRY_SEND) || !h1_send(h1c))
break;
}

+ 141
- 0
net/haproxy/patches/019-BUG-MAJOR-hashes-fix-the-signedness-of-the-hash-inputs.patch View File

@ -0,0 +1,141 @@
commit e1275ddb8b427c88fb3cb3d8f7cd6ec576ce1e2d
Author: Willy Tarreau <w@1wt.eu>
Date: Wed Jan 15 10:54:42 2020 +0100
BUG/MAJOR: hashes: fix the signedness of the hash inputs
Wietse Venema reported in the thread below that we have a signedness
issue with our hashes implementations: due to the use of const char*
for the input key that's often text, the crc32, sdbm, djb2, and wt6
algorithms return a platform-dependent value for binary input keys
containing bytes with bit 7 set. This means that an ARM or PPC
platform will hash binary inputs differently from an x86 typically.
Worse, some algorithms are well defined in the industry (like CRC32)
and do not provide the expected result on x86, possibly causing
interoperability issues (e.g. a user-agent would fail to compare the
CRC32 of a message body against the one computed by haproxy).
Fortunately, and contrary to the first impression, the CRC32c variant
used in the PROXY protocol processing is not affected. Thus the impact
remains very limited (the vast majority of input keys are text-based,
such as user-agent headers for exmaple).
This patch addresses the issue by fixing all hash functions' prototypes
(even those not affected, for API consistency). A reg test will follow
in another patch.
The vast majority of users do not use these hashes. And among those
using them, very few will pass them on binary inputs. However, for the
rare ones doing it, this fix MAY have an impact during the upgrade. For
example if the package is upgraded on one LB then on another one, and
the CRC32 of a binary input is used as a stick table key (why?) then
these CRCs will not match between both nodes. Similarly, if
"hash-type ... crc32" is used, LB inconsistency may appear during the
transition. For this reason it is preferable to apply the patch on all
nodes using such hashes at the same time. Systems upgraded via their
distros will likely observe the least impact since they're expected to
be upgraded within a short time frame.
And it is important for distros NOT to skip this fix, in order to avoid
distributing an incompatible implementation of a hash. This is the
reason why this patch is tagged as MAJOR, eventhough it's extremely
unlikely that anyone will ever notice a change at all.
This patch must be backported to all supported branches since the
hashes were introduced in 1.5-dev20 (commit 98634f0c). Some parts
may be dropped since implemented later.
Link to Wietse's report:
https://marc.info/?l=postfix-users&m=157879464518535&w=2
(cherry picked from commit 340b07e8686ed0095291e937628d064bdcc7a3dd)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/include/common/hash.h b/include/common/hash.h
index 78fd87b96..c17f8c9ff 100644
--- a/include/common/hash.h
+++ b/include/common/hash.h
@@ -24,10 +24,10 @@
#include <inttypes.h>
-unsigned int hash_djb2(const char *key, int len);
-unsigned int hash_wt6(const char *key, int len);
-unsigned int hash_sdbm(const char *key, int len);
-unsigned int hash_crc32(const char *key, int len);
-uint32_t hash_crc32c(const char *key, int len);
+unsigned int hash_djb2(const void *input, int len);
+unsigned int hash_wt6(const void *input, int len);
+unsigned int hash_sdbm(const void *input, int len);
+unsigned int hash_crc32(const void *input, int len);
+uint32_t hash_crc32c(const void *input, int len);
#endif /* _COMMON_HASH_H_ */
diff --git a/src/hash.c b/src/hash.c
index 70451579b..8984ef36d 100644
--- a/src/hash.c
+++ b/src/hash.c
@@ -17,8 +17,9 @@
#include <common/hash.h>
-unsigned int hash_wt6(const char *key, int len)
+unsigned int hash_wt6(const void *input, int len)
{
+ const unsigned char *key = input;
unsigned h0 = 0xa53c965aUL;
unsigned h1 = 0x5ca6953aUL;
unsigned step0 = 6;
@@ -27,7 +28,7 @@ unsigned int hash_wt6(const char *key, int len)
for (; len > 0; len--) {
unsigned int t;
- t = ((unsigned int)*key);
+ t = *key;
key++;
h0 = ~(h0 ^ t);
@@ -44,8 +45,9 @@ unsigned int hash_wt6(const char *key, int len)
return h0 ^ h1;
}
-unsigned int hash_djb2(const char *key, int len)
+unsigned int hash_djb2(const void *input, int len)
{
+ const unsigned char *key = input;
unsigned int hash = 5381;
/* the hash unrolled eight times */
@@ -72,8 +74,9 @@ unsigned int hash_djb2(const char *key, int len)
return hash;
}
-unsigned int hash_sdbm(const char *key, int len)
+unsigned int hash_sdbm(const void *input, int len)
{
+ const unsigned char *key = input;
unsigned int hash = 0;
int c;
@@ -92,8 +95,9 @@ unsigned int hash_sdbm(const char *key, int len)
* this hash already sustains gigabit speed which is far faster than what
* we'd ever need. Better preserve the CPU's cache instead.
*/
-unsigned int hash_crc32(const char *key, int len)
+unsigned int hash_crc32(const void *input, int len)
{
+ const unsigned char *key = input;
unsigned int hash;
int bit;
@@ -174,8 +178,9 @@ static const uint32_t crctable[256] = {
0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L
};
-uint32_t hash_crc32c(const char *buf, int len)
+uint32_t hash_crc32c(const void *input, int len)
{
+ const unsigned char *buf = input;
uint32_t crc = 0xffffffff;
while (len-- > 0) {
crc = (crc >> 8) ^ crctable[(crc ^ (*buf++)) & 0xff];

+ 127
- 0
net/haproxy/patches/020-REGTEST-add-sample_fetches-hashes-vtc-to-validate-hashes.patch View File

@ -0,0 +1,127 @@
commit 2a8ac8c1000ffe0e2e874146aed012409a0ce251
Author: Willy Tarreau <w@1wt.eu>
Date: Wed Jan 15 11:31:01 2020 +0100
REGTEST: add sample_fetches/hashes.vtc to validate hashes
This regtest validates all hashes that we support, on all input bytes from
0x00 to 0xFF. Those supporting avalanche are tested as well. It also tests
len(), hex() and base64(). It purposely does not enable sha2() because this
one relies on OpenSSL and there's no point in validating that OpenSSL knows
how to hash, what matters is that we can test our hashing functions in all
cases. However since the tests were written, they're still present and
commented out in case that helps.
It may be backported to supported versions, possibly dropping a few algos
that were not supported (e.g. crc32c requires 1.9 minimum).
Note that this test will fail on crc32/djb2/sdbm/wt6 unless patches
"BUG/MINOR: stream: init variables when the list is empty" and
"BUG/MAJOR: hashes: fix the signedness of the hash inputs" are included.
(cherry picked from commit ec9ac54982841d49859747f6a535bf7444284bc3)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/reg-tests/sample_fetches/hashes.vtc b/reg-tests/sample_fetches/hashes.vtc
new file mode 100644
index 000000000..874f81e41
--- /dev/null
+++ b/reg-tests/sample_fetches/hashes.vtc
@@ -0,0 +1,97 @@
+varnishtest "Hash validity test"
+
+#REQUIRE_VERSION=1.9
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect 1s
+ timeout client 1s
+ timeout server 1s
+
+ frontend fe
+ bind "fd@${fe}"
+
+ # base64 encoding of \x00\x01\x02...\xFF
+ http-response set-var(res.key) "str(AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w==),b64dec"
+
+ # length (start:0, next:255)
+ http-response set-header x-len0 "%[var(res.key),length]"
+ http-response set-header x-len1 "%[var(res.key),bytes(1),length]"
+
+ # text-based encoding
+ http-response set-header x-hex "%[var(res.key),hex]"
+ http-response set-header x-b64 "%[var(res.key),base64]"
+
+ # SHA family
+ http-response set-header x-sha1 "%[var(res.key),sha1,hex]"
+ #http-response set-header x-sha2 "%[var(res.key),sha2,hex]"
+ #http-response set-header x-sha2-224 "%[var(res.key),sha2(224),hex]"
+ #http-response set-header x-sha2-256 "%[var(res.key),sha2(256),hex]"
+ #http-response set-header x-sha2-384 "%[var(res.key),sha2(384),hex]"
+ #http-response set-header x-sha2-512 "%[var(res.key),sha2(512),hex]"
+
+ # 32-bit hashes, and their avalanche variants
+ http-response set-header x-crc32 "%[var(res.key),crc32]"
+ http-response set-header x-crc32-1 "%[var(res.key),crc32(1)]"
+
+ http-response set-header x-crc32c "%[var(res.key),crc32c]"
+ http-response set-header x-crc32c-1 "%[var(res.key),crc32c(1)]"
+
+ http-response set-header x-djb2 "%[var(res.key),djb2]"
+ http-response set-header x-djb2-1 "%[var(res.key),djb2(1)]"
+
+ http-response set-header x-sdbm "%[var(res.key),sdbm]"
+ http-response set-header x-sdbm-1 "%[var(res.key),sdbm(1)]"
+
+ http-response set-header x-wt6 "%[var(res.key),wt6]"
+ http-response set-header x-wt6-1 "%[var(res.key),wt6(1)]"
+
+ # 32/64-bit hashes, with seed variant
+ http-response set-header x-xxh32 "%[var(res.key),xxh32]"
+ http-response set-header x-xxh32-1 "%[var(res.key),xxh32(1)]"
+ http-response set-header x-xxh64 "%[var(res.key),xxh64]"
+ http-response set-header x-xxh64-1 "%[var(res.key),xxh64(1)]"
+ default_backend be
+
+ backend be
+ server srv1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-len0 == "0"
+ expect resp.http.x-len1 == "255"
+ expect resp.http.x-hex == "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9FA0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEFF0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF"
+ expect resp.http.x-b64 == "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w=="
+
+ expect resp.http.x-sha1 == "4916D6BDB7F78E6803698CAB32D1586EA457DFC8"
+ #expect resp.http.x-sha2 == "40AFF2E9D2D8922E47AFD4648E6967497158785FBD1DA870E7110266BF944880"
+ #expect resp.http.x-sha2-224 == "88702E63237824C4EB0D0FCFE41469A462493E8BEB2A75BBE5981734"
+ #expect resp.http.x-sha2-256 == "40AFF2E9D2D8922E47AFD4648E6967497158785FBD1DA870E7110266BF944880"
+ #expect resp.http.x-sha2-384 == "FFDAEBFF65ED05CF400F0221C4CCFB4B2104FB6A51F87E40BE6C4309386BFDEC2892E9179B34632331A59592737DB5C5"
+ #expect resp.http.x-sha2-512 == "1E7B80BC8EDC552C8FEEB2780E111477E5BC70465FAC1A77B29B35980C3F0CE4A036A6C9462036824BD56801E62AF7E9FEBA5C22ED8A5AF877BF7DE117DCAC6D"
+ expect resp.http.x-crc32 == "688229491"
+ expect resp.http.x-crc32-1 == "4230317029"
+ expect resp.http.x-crc32c == "2621708363"
+ expect resp.http.x-crc32c-1 == "2242979626"
+ expect resp.http.x-djb2 == "2589693061"
+ expect resp.http.x-djb2-1 == "600622701"
+ expect resp.http.x-sdbm == "905707648"
+ expect resp.http.x-sdbm-1 == "3103804144"
+ expect resp.http.x-wt6 == "4090277559"
+ expect resp.http.x-wt6-1 == "1192658767"
+ expect resp.http.x-xxh32 == "1497633363"
+ expect resp.http.x-xxh32-1 == "1070421674"
+ expect resp.http.x-xxh64 == "2282408585429094475"
+ expect resp.http.x-xxh64-1 == "-4689339368900765961"
+} -run

+ 156
- 0
net/haproxy/patches/021-BUG-MEDIUM-cli-_getsocks-must-send-the-peers-sockets.patch View File

@ -0,0 +1,156 @@
commit 53f802b06a8c165c39cb1b9a3455366e1293d1ed
Author: William Lallemand <wlallemand@haproxy.org>
Date: Thu Jan 16 15:32:08 2020 +0100
BUG/MEDIUM: cli: _getsocks must send the peers sockets
This bug prevents to reload HAProxy when you have both the seamless
reload (-x / expose-fd listeners) and the peers.
Indeed the _getsocks command does not send the FDs of the peers
listeners, so if no reuseport is possible during the bind, the new
process will fail to bind and exits.
With this feature, it is not possible to fallback on the SIGTTOU method
if we didn't receive all the sockets, because you can't close() the
sockets of the new process without closing those of the previous
process, they are the same.
Should fix bug #443.
Must be backported as far as 1.8.
(cherry picked from commit 5fd3b28c9c071376a9bffb427b25872ffc068601)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/cli.c b/src/cli.c
index d68e2b299..2dca17552 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -1612,6 +1612,7 @@ static int _getsocks(char **args, char *payload, struct appctx *appctx, void *pr
int *tmpfd;
int tot_fd_nb = 0;
struct proxy *px;
+ struct peers *prs;
int i = 0;
int fd = -1;
int curoff = 0;
@@ -1664,6 +1665,22 @@ static int _getsocks(char **args, char *payload, struct appctx *appctx, void *pr
}
px = px->next;
}
+ prs = cfg_peers;
+ while (prs) {
+ if (prs->peers_fe) {
+ struct listener *l;
+
+ list_for_each_entry(l, &prs->peers_fe->conf.listeners, by_fe) {
+ /* Only transfer IPv4/IPv6/UNIX sockets */
+ if (l->state >= LI_ZOMBIE &&
+ (l->proto->sock_family == AF_INET ||
+ l->proto->sock_family == AF_INET6 ||
+ l->proto->sock_family == AF_UNIX))
+ tot_fd_nb++;
+ }
+ }
+ prs = prs->next;
+ }
if (tot_fd_nb == 0)
goto out;
@@ -1687,7 +1704,6 @@ static int _getsocks(char **args, char *payload, struct appctx *appctx, void *pr
cmsg->cmsg_type = SCM_RIGHTS;
tmpfd = (int *)CMSG_DATA(cmsg);
- px = proxies_list;
/* For each socket, e message is sent, containing the following :
* Size of the namespace name (or 0 if none), as an unsigned char.
* The namespace name, if any
@@ -1704,6 +1720,7 @@ static int _getsocks(char **args, char *payload, struct appctx *appctx, void *pr
goto out;
}
iov.iov_base = tmpbuf;
+ px = proxies_list;
while (px) {
struct listener *l;
@@ -1737,7 +1754,6 @@ static int _getsocks(char **args, char *payload, struct appctx *appctx, void *pr
sizeof(l->options));
curoff += sizeof(l->options);
-
i++;
} else
continue;
@@ -1758,10 +1774,70 @@ static int _getsocks(char **args, char *payload, struct appctx *appctx, void *pr
}
curoff = 0;
}
-
}
px = px->next;
}
+ /* should be done for peers too */
+ prs = cfg_peers;
+ while (prs) {
+ if (prs->peers_fe) {
+ struct listener *l;
+
+ list_for_each_entry(l, &prs->peers_fe->conf.listeners, by_fe) {
+ int ret;
+ /* Only transfer IPv4/IPv6 sockets */
+ if (l->state >= LI_ZOMBIE &&
+ (l->proto->sock_family == AF_INET ||
+ l->proto->sock_family == AF_INET6 ||
+ l->proto->sock_family == AF_UNIX)) {
+ memcpy(&tmpfd[i % MAX_SEND_FD], &l->fd, sizeof(l->fd));
+ if (!l->netns)
+ tmpbuf[curoff++] = 0;
+#ifdef USE_NS
+ else {
+ char *name = l->netns->node.key;
+ unsigned char len = l->netns->name_len;
+ tmpbuf[curoff++] = len;
+ memcpy(tmpbuf + curoff, name, len);
+ curoff += len;
+ }
+#endif
+ if (l->interface) {
+ unsigned char len = strlen(l->interface);
+ tmpbuf[curoff++] = len;
+ memcpy(tmpbuf + curoff, l->interface, len);
+ curoff += len;
+ } else
+ tmpbuf[curoff++] = 0;
+ memcpy(tmpbuf + curoff, &l->options,
+ sizeof(l->options));
+ curoff += sizeof(l->options);
+
+ i++;
+ } else
+ continue;
+ if ((!(i % MAX_SEND_FD))) {
+ iov.iov_len = curoff;
+ if (sendmsg(fd, &msghdr, 0) != curoff) {
+ ha_warning("Failed to transfer sockets\n");
+ goto out;
+ }
+ /* Wait for an ack */
+ do {
+ ret = recv(fd, &tot_fd_nb,
+ sizeof(tot_fd_nb), 0);
+ } while (ret == -1 && errno == EINTR);
+ if (ret <= 0) {
+ ha_warning("Unexpected error while transferring sockets\n");
+ goto out;
+ }
+ curoff = 0;
+ }
+ }
+ }
+ prs = prs->next;
+ }
+
if (i % MAX_SEND_FD) {
iov.iov_len = curoff;
cmsg->cmsg_len = CMSG_LEN((i % MAX_SEND_FD) * sizeof(int));

+ 70
- 0
net/haproxy/patches/022-BUG-MINOR-stream-dont-mistake-match-rules-for-store-request-rules.patch View File

@ -0,0 +1,70 @@
commit 791df6f59a268e432ef7bc675084acaa6f1a2ed8
Author: Jerome Magnin <jmagnin@haproxy.com>
Date: Thu Jan 16 17:37:21 2020 +0100
BUG/MINOR: stream: don't mistake match rules for store-request rules
In process_sticking_rules() we only want to apply the first store-request
rule for a given table, but when doing so we need to make sure we only
count actual store-request rules when we list the sticking rules.
Failure to do so leads to not being able to write store-request and match
sticking rules in any order as a match rule after a store-request rule
will be ignored.
The following configuration reproduces the issue:
global
stats socket /tmp/foobar
defaults
mode http
frontend in
bind *:8080
default_backend bar
backend bar
server s1 127.0.0.1:21212
server s2 127.0.0.1:21211
stick store-request req.hdr(foo)
stick match req.hdr(foo)
stick-table type string size 10
listen foo
bind *:21212
bind *:21211
http-request deny deny_status 200 if { dst_port 21212 }
http-request deny
This patch fixes issue #448 and should be backported as far as 1.6.
(cherry picked from commit bee00ad080ff9359df8a670e891a6c2bce4acc39)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/stream.c b/src/stream.c
index 4efc16bd7..2dd7141aa 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -1786,13 +1786,15 @@ static int process_sticking_rules(struct stream *s, struct channel *req, int an_
* An example could be a store of the IP address from an HTTP
* header first, then from the source if not found.
*/
- for (i = 0; i < s->store_count; i++) {
- if (rule->table.t == s->store[i].table)
- break;
- }
+ if (rule->flags & STK_IS_STORE) {
+ for (i = 0; i < s->store_count; i++) {
+ if (rule->table.t == s->store[i].table)
+ break;
+ }
- if (i != s->store_count)
- continue;
+ if (i != s->store_count)
+ continue;
+ }
if (rule->cond) {
ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);

+ 145
- 0
net/haproxy/patches/023-BUG-MEDIUM-connection-add-a-mux-flag-to-indicate-splice-usability.patch View File

@ -0,0 +1,145 @@
commit 7195d4b9396687e67da196cb92ef25b4bd6938d8
Author: Willy Tarreau <w@1wt.eu>
Date: Fri Jan 17 16:19:34 2020 +0100
BUG/MEDIUM: connection: add a mux flag to indicate splice usability
Commit c640ef1a7d ("BUG/MINOR: stream-int: avoid calling rcv_buf() when
splicing is still possible") fixed splicing in TCP and legacy mode but
broke it badly in HTX mode.
What happens in HTX mode is that the channel's to_forward value remains
set to CHN_INFINITE_FORWARD during the whole transfer, and as such it is
not a reliable signal anymore to indicate whether more data are expected
or not. Thus, when data are spliced out of the mux using rcv_pipe(), even
when the end is reached (that only the mux knows about), the call to
rcv_buf() to get the final HTX blocks completing the message were skipped
and there was often no new event to wake this up, resulting in transfer
timeouts at the end of large objects.
All this goes down to the fact that the channel has no more information
about whether it can splice or not despite being the one having to take
the decision to call rcv_pipe() or not. And we cannot afford to call
rcv_buf() inconditionally because, as the commit above showed, this
reduces the forwarding performance by 2 to 3 in TCP and legacy modes
due to data lying in the buffer preventing splicing from being used
later.
The approach taken by this patch consists in offering the muxes the ability
to report a bit more information to the upper layers via the conn_stream.
This information could simply be to indicate that more data are awaited
but the real need being to distinguish splicing and receiving, here
instead we clearly report the mux's willingness to be called for splicing
or not. Hence the flag's name, CS_FL_MAY_SPLICE.
The mux sets this flag when it knows that its buffer is empty and that
data waiting past what is currently known may be spliced, and clears it
when it knows there's no more data or that the caller must fall back to
rcv_buf() instead.
The stream-int code now uses this to determine if splicing may be used
or not instead of looking at the rcv_pipe() callbacks through the whole
chain. And after the rcv_pipe() call, it checks the flag again to decide
whether it may safely skip rcv_buf() or not.
All this bitfield dance remains a bit complex and it starts to appear
obvious that splicing vs reading should be a decision of the mux based
on permission granted by the data layer. This would however increase
the API's complexity but definitely need to be thought about, and should
even significantly simplify the data processing layer.
The way it was integrated in mux-h1 will also result in no more calls
to rcv_pipe() on chunked encoded data, since these ones are currently
disabled at the mux level. However once the issue with chunks+splice
is fixed, it will be important to explicitly check for curr_len|CHNK
to set MAY_SPLICE, so that we don't call rcv_buf() after each chunk.
This fix must be backported to 2.1 and 2.0.
(cherry picked from commit 17ccd1a3560a634a17d276833ff41b8063b72206)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/include/types/connection.h b/include/types/connection.h
index 165a683ae..f2aa63c33 100644
--- a/include/types/connection.h
+++ b/include/types/connection.h
@@ -95,7 +95,7 @@ enum {
CS_FL_EOS = 0x00001000, /* End of stream delivered to data layer */
/* unused: 0x00002000 */
CS_FL_EOI = 0x00004000, /* end-of-input reached */
- /* unused: 0x00008000 */
+ CS_FL_MAY_SPLICE = 0x00008000, /* caller may use rcv_pipe() only if this flag is set */
CS_FL_WAIT_FOR_HS = 0x00010000, /* This stream is waiting for handhskae */
CS_FL_KILL_CONN = 0x00020000, /* must kill the connection when the CS closes */
diff --git a/src/mux_h1.c b/src/mux_h1.c
index d93a7eab5..b76a58fe4 100644
--- a/src/mux_h1.c
+++ b/src/mux_h1.c
@@ -489,6 +489,9 @@ static struct conn_stream *h1s_new_cs(struct h1s *h1s)
if (h1s->flags & H1S_F_NOT_FIRST)
cs->flags |= CS_FL_NOT_FIRST;
+ if (global.tune.options & GTUNE_USE_SPLICE)
+ cs->flags |= CS_FL_MAY_SPLICE;
+
if (stream_create_from_cs(cs) < 0) {
TRACE_DEVEL("leaving on stream creation failure", H1_EV_STRM_NEW|H1_EV_STRM_END|H1_EV_STRM_ERR, h1s->h1c->conn, h1s);
goto err;
@@ -1275,6 +1278,11 @@ static size_t h1_process_data(struct h1s *h1s, struct h1m *h1m, struct htx **htx
goto end;
}
+ if (h1m->state == H1_MSG_DATA && h1m->curr_len && h1s->cs)
+ h1s->cs->flags |= CS_FL_MAY_SPLICE;
+ else if (h1s->cs)
+ h1s->cs->flags &= ~CS_FL_MAY_SPLICE;
+
*ofs += ret;
end:
@@ -2725,6 +2733,9 @@ static int h1_rcv_pipe(struct conn_stream *cs, struct pipe *pipe, unsigned int c
TRACE_STATE("read0 on connection", H1_EV_STRM_RECV, cs->conn, h1s);
}
+ if (h1m->state != H1_MSG_DATA || !h1m->curr_len)
+ cs->flags &= ~CS_FL_MAY_SPLICE;
+
TRACE_LEAVE(H1_EV_STRM_RECV, cs->conn, h1s);
return ret;
}
diff --git a/src/mux_pt.c b/src/mux_pt.c
index 6cbc689ce..2ac7d4715 100644
--- a/src/mux_pt.c
+++ b/src/mux_pt.c
@@ -111,6 +111,8 @@ static int mux_pt_init(struct connection *conn, struct proxy *prx, struct sessio
conn->ctx = ctx;
ctx->cs = cs;
cs->flags |= CS_FL_RCV_MORE;
+ if (global.tune.options & GTUNE_USE_SPLICE)
+ cs->flags |= CS_FL_MAY_SPLICE;
return 0;
fail_free:
diff --git a/src/stream_interface.c b/src/stream_interface.c
index 012ac71e0..a2ea7d779 100644
--- a/src/stream_interface.c
+++ b/src/stream_interface.c
@@ -1268,7 +1268,7 @@ int si_cs_recv(struct conn_stream *cs)
/* First, let's see if we may splice data across the channel without
* using a buffer.
*/
- if (conn->xprt->rcv_pipe && conn->mux->rcv_pipe &&
+ if (cs->flags & CS_FL_MAY_SPLICE &&
(ic->pipe || ic->to_forward >= MIN_SPLICE_FORWARD) &&
ic->flags & CF_KERN_SPLICING) {
if (c_data(ic)) {
@@ -1327,7 +1327,7 @@ int si_cs_recv(struct conn_stream *cs)
ic->pipe = NULL;
}
- if (ic->pipe && ic->to_forward && !(flags & CO_RFL_BUF_FLUSH)) {
+ if (ic->pipe && ic->to_forward && !(flags & CO_RFL_BUF_FLUSH) && cs->flags & CS_FL_MAY_SPLICE) {
/* don't break splicing by reading, but still call rcv_buf()
* to pass the flag.
*/

+ 44
- 0
net/haproxy/patches/024-BUG-MINOR-pattern-handle-errors-from-fgets-when-trying-to-load-patterns.patch View File

@ -0,0 +1,44 @@
commit bfa549da979e13f6c6a2e2defb7bbda5efa908f5
Author: Jerome Magnin <jmagnin@haproxy.com>
Date: Fri Jan 17 16:09:33 2020 +0100
BUG/MINOR: pattern: handle errors from fgets when trying to load patterns
We need to do some error handling after we call fgets to make sure everything
went fine. If we don't users can be fooled into thinking they can load pattens
from directory because cfgparse doesn't flinch. This applies to acl patterns
map files.
This should be backported to all supported versions.
(cherry picked from commit 3c79d4bdc47e151a97d7acdd99382bd9ca3927a5)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/pattern.c b/src/pattern.c
index ec7e9556a..33e0e17f2 100644
--- a/src/pattern.c
+++ b/src/pattern.c
@@ -2328,6 +2328,11 @@ int pat_ref_read_from_file_smp(struct pat_ref *ref, const char *filename, char *
}
}
+ if (ferror(file)) {
+ memprintf(err, "error encountered while reading <%s> : %s",
+ filename, strerror(errno));
+ goto out_close;
+ }
/* succes */
ret = 1;
@@ -2385,6 +2390,11 @@ int pat_ref_read_from_file(struct pat_ref *ref, const char *filename, char **err
}
}
+ if (ferror(file)) {
+ memprintf(err, "error encountered while reading <%s> : %s",
+ filename, strerror(errno));
+ goto out_close;
+ }
ret = 1; /* success */
out_close:

+ 26
- 0
net/haproxy/patches/025-BUG-MINOR-cache-Fix-leak-of-cache-name-in-error-path.patch View File

@ -0,0 +1,26 @@
commit 09582bac29264997d71fcfb897d045c2dcac72f6
Author: Tim Duesterhus <tim@bastelstu.be>
Date: Sat Jan 18 01:46:18 2020 +0100
BUG/MINOR: cache: Fix leak of cache name in error path
This issue was introduced in commit 99a17a2d91f9044ea20bba6617048488aed80555
which first appeared in tag v1.9-dev11. This bugfix should be backported
to HAProxy 1.9+.
(cherry picked from commit d34b1ce5a20ce8f62b234f9696a621aaebe694c1)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/cache.c b/src/cache.c
index 8e2acd1cb..dc11cf532 100644
--- a/src/cache.c
+++ b/src/cache.c
@@ -1452,7 +1452,7 @@ parse_cache_flt(char **args, int *cur_arg, struct proxy *px,
cconf = NULL;
memprintf(err, "%s: multiple explicit declarations of the cache filter '%s'",
px->id, name);
- return -1;
+ goto error;
}
/* Remove the implicit filter. <cconf> is kept for the explicit one */

+ 32
- 0
net/haproxy/patches/026-BUG-MINOR-dns-Make-dns_query_id_seed-unsigned.patch View File

@ -0,0 +1,32 @@
commit bf61c6cd41f59e68221eda04e0e4a10d9fafab48
Author: Tim Duesterhus <tim@bastelstu.be>
Date: Sat Jan 18 02:04:12 2020 +0100
BUG/MINOR: dns: Make dns_query_id_seed unsigned
Left shifting of large signed values and negative values is undefined.
In a test script clang's ubsan rightfully complains:
> runtime error: left shift of 1934242336581872173 by 13 places cannot be represented in type 'int64_t' (aka 'long')
This bug was introduced in the initial version of the DNS resolver
in 325137d603aa81bd24cbd8c99d816dd42291daa7. The fix must be backported
to HAProxy 1.6+.
(cherry picked from commit fcac33d0c1138ef22914c3b36518c1df105c9b72)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/dns.c b/src/dns.c
index 8ea6fb271..a7e43dfe3 100644
--- a/src/dns.c
+++ b/src/dns.c
@@ -54,7 +54,7 @@
struct list dns_resolvers = LIST_HEAD_INIT(dns_resolvers);
struct list dns_srvrq_list = LIST_HEAD_INIT(dns_srvrq_list);
-static THREAD_LOCAL int64_t dns_query_id_seed = 0; /* random seed */
+static THREAD_LOCAL uint64_t dns_query_id_seed = 0; /* random seed */
DECLARE_STATIC_POOL(dns_answer_item_pool, "dns_answer_item", sizeof(struct dns_answer_item));
DECLARE_STATIC_POOL(dns_resolution_pool, "dns_resolution", sizeof(struct dns_resolution));

+ 92
- 0
net/haproxy/patches/027-BUG-MINOR-51d-Fix-bug-when-HTX-is-enabled.patch View File

@ -0,0 +1,92 @@
commit 84a6e9e474e3435849b4341a066079b7b93cd8e9
Author: Ben51Degrees <Ben@51Degrees.com>
Date: Mon Jan 20 11:25:11 2020 +0000
BUG/MINOR: 51d: Fix bug when HTX is enabled
When HTX is enabled, the sample flags were set too early. When matching for
multiple HTTP headers, the sample is fetched more than once, meaning that the
flags would need to be set again. Instead, the flags are now set last (just
before the outermost function returns). This could be further improved by
passing around the message without calling prefetch again.
This patch must be backported as far as 1.9. it should fix bug #450.
(cherry picked from commit 6bf06727116eb48825cf4c4b65970b8305591925)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/51d.c b/src/51d.c
index 96bbb6639..b00f01844 100644
--- a/src/51d.c
+++ b/src/51d.c
@@ -395,6 +395,21 @@ static void _51d_process_match(const struct arg *args, struct sample *smp, fifty
smp->data.u.str.data = temp->data;
}
+/* Sets the sample data as a constant string. This ensures that the
+ * string will be processed correctly.
+ */
+static void _51d_set_smp(struct sample *smp)
+{
+ /*
+ * Data type has to be set to ensure the string output is processed
+ * correctly.
+ */
+ smp->data.type = SMP_T_STR;
+
+ /* Flags the sample to show it uses constant memory. */
+ smp->flags |= SMP_F_CONST;
+}
+
static int _51d_fetch(const struct arg *args, struct sample *smp, const char *kw, void *private)
{
#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
@@ -413,14 +428,6 @@ static int _51d_fetch(const struct arg *args, struct sample *smp, const char *kw
if (!htx)
return 0;
- /*
- * Data type has to be reset to ensure the string output is processed
- * correctly.
- */
- smp->data.type = SMP_T_STR;
-
- /* Flags the sample to show it uses constant memory*/
- smp->flags |= SMP_F_CONST;
#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
@@ -448,6 +455,8 @@ static int _51d_fetch(const struct arg *args, struct sample *smp, const char *kw
fiftyoneDegreesWorksetPoolRelease(global_51degrees.pool, ws);
_51d_retrieve_cache_entry(smp, lru);
HA_SPIN_UNLOCK(OTHER_LOCK, &_51d_lru_lock);
+
+ _51d_set_smp(smp);
return 1;
}
HA_SPIN_UNLOCK(OTHER_LOCK, &_51d_lru_lock);
@@ -485,6 +494,7 @@ static int _51d_fetch(const struct arg *args, struct sample *smp, const char *kw
_51d_insert_cache_entry(smp, lru, (void*)args);
#endif
+ _51d_set_smp(smp);
return 1;
}
@@ -497,8 +507,6 @@ static int _51d_conv(const struct arg *args, struct sample *smp, void *private)
#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
fiftyoneDegreesDeviceOffsets *offsets; /* Offsets for detection */
#endif
- /* Flags the sample to show it uses constant memory*/
- smp->flags |= SMP_F_CONST;
#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
@@ -560,6 +568,7 @@ static int _51d_conv(const struct arg *args, struct sample *smp, void *private)
#endif
#endif
+ _51d_set_smp(smp);
return 1;
}

+ 28
- 0
net/haproxy/patches/028-BUILD-pattern-include-errno-h.patch View File

@ -0,0 +1,28 @@
commit fb815462c6720c63d45e8fc09c35c49de6160888
Author: Jerome Magnin <jmagnin@haproxy.com>
Date: Fri Jan 17 18:01:20 2020 +0100
BUILD: pattern: include errno.h
Commit 3c79d4bdc introduced the use of errno in pattern.c without
including errno.h.
If we build haproxy without any option errno is not defined and the
build fails.
(cherry picked from commit b8bd6d7efd6db5d964eae902e8f3c09a757b12a9)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
[Cf: I miissed this one during my last backports]
diff --git a/src/pattern.c b/src/pattern.c
index 33e0e17f2..90067cd23 100644
--- a/src/pattern.c
+++ b/src/pattern.c
@@ -12,6 +12,7 @@
#include <ctype.h>
#include <stdio.h>
+#include <errno.h>
#include <common/config.h>
#include <common/standard.h>

+ 202
- 0
net/haproxy/patches/029-BUG-MINOR-http-ana-filters-Wait-end-of-the-http_end-callback-for-all-filters.patch View File

@ -0,0 +1,202 @@
commit 8ece0801d813d6f821dabde13f7a74759dd95ee4
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Fri Nov 15 16:31:46 2019 +0100
BUG/MINOR: http-ana/filters: Wait end of the http_end callback for all filters
Filters may define the "http_end" callback, called at the end of the analysis of
any HTTP messages. It is called at the end of the payload forwarding and it can
interrupt the stream processing. So we must be sure to not remove the XFER_BODY
analyzers while there is still at least filter in progress on this callback.
Unfortunatly, once the request and the response are borh in the DONE or the
TUNNEL mode, we consider the XFER_BODY analyzer has finished its processing on
both sides. So it is possible to prematurely interrupt the execution of the
filters "http_end" callback.
To fix this bug, we switch a message in the ENDING state. It is then switched in
DONE/TUNNEL mode only after the execution of the filters "http_end" callback.
This patch must be backported (and adapted) to 2.1, 2.0 and 1.9. The legacy HTTP
mode shoud probaly be fixed too.
(cherry picked from commit 1a3e0279c6079174288e2e3fbbf09e530ff221c5)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/http_ana.c b/src/http_ana.c
index 268796d2e..047ed813a 100644
--- a/src/http_ana.c
+++ b/src/http_ana.c
@@ -1166,11 +1166,8 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
if (req->to_forward) {
if (req->to_forward == CHN_INFINITE_FORWARD) {
- if (req->flags & CF_EOI) {
- msg->msg_state = HTTP_MSG_DONE;
- req->to_forward = 0;
- goto done;
- }
+ if (req->flags & CF_EOI)
+ msg->msg_state = HTTP_MSG_ENDING;
}
else {
/* We can't process the buffer's contents yet */
@@ -1179,8 +1176,14 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
}
}
- if (msg->msg_state >= HTTP_MSG_DONE)
- goto done;
+ if (msg->msg_state >= HTTP_MSG_ENDING)
+ goto ending;
+
+ if (txn->meth == HTTP_METH_CONNECT) {
+ msg->msg_state = HTTP_MSG_ENDING;
+ goto ending;
+ }
+
/* Forward input data. We get it by removing all outgoing data not
* forwarded yet from HTX data size. If there are some data filters, we
* let them decide the amount of data to forward.
@@ -1197,11 +1200,8 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
channel_htx_forward_forever(req, htx);
}
- if (txn->meth == HTTP_METH_CONNECT) {
- msg->msg_state = HTTP_MSG_TUNNEL;
- goto done;
- }
-
+ if (htx->data != co_data(req))
+ goto missing_data_or_waiting;
/* Check if the end-of-message is reached and if so, switch the message
* in HTTP_MSG_ENDING state. Then if all data was marked to be
@@ -1211,16 +1211,11 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
goto missing_data_or_waiting;
msg->msg_state = HTTP_MSG_ENDING;
- if (htx->data != co_data(req))
- goto missing_data_or_waiting;
- msg->msg_state = HTTP_MSG_DONE;
- req->to_forward = 0;
- done:
- /* other states, DONE...TUNNEL */
- /* we don't want to forward closes on DONE except in tunnel mode. */
- if (!(txn->flags & TX_CON_WANT_TUN))
- channel_dont_close(req);
+ ending:
+ /* other states, ENDING...TUNNEL */
+ if (msg->msg_state >= HTTP_MSG_DONE)
+ goto done;
if (HAS_REQ_DATA_FILTERS(s)) {
ret = flt_http_end(s, msg);
@@ -1231,6 +1226,18 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
}
}
+ if (txn->meth == HTTP_METH_CONNECT)
+ msg->msg_state = HTTP_MSG_TUNNEL;
+ else {
+ msg->msg_state = HTTP_MSG_DONE;
+ req->to_forward = 0;
+ }
+
+ done:
+ /* we don't want to forward closes on DONE except in tunnel mode. */
+ if (!(txn->flags & TX_CON_WANT_TUN))
+ channel_dont_close(req);
+
http_end_request(s);
if (!(req->analysers & an_bit)) {
http_end_response(s);
@@ -2179,11 +2186,8 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
if (res->to_forward) {
if (res->to_forward == CHN_INFINITE_FORWARD) {
- if (res->flags & CF_EOI) {
- msg->msg_state = HTTP_MSG_DONE;
- res->to_forward = 0;
- goto done;
- }
+ if (res->flags & CF_EOI)
+ msg->msg_state = HTTP_MSG_ENDING;
}
else {
/* We can't process the buffer's contents yet */
@@ -2192,8 +2196,14 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
}
}
- if (msg->msg_state >= HTTP_MSG_DONE)
- goto done;
+ if (msg->msg_state >= HTTP_MSG_ENDING)
+ goto ending;
+
+ if ((txn->meth == HTTP_METH_CONNECT && txn->status == 200) || txn->status == 101 ||
+ (!(msg->flags & HTTP_MSGF_XFER_LEN) && !HAS_RSP_DATA_FILTERS(s))) {
+ msg->msg_state = HTTP_MSG_ENDING;
+ goto ending;
+ }
/* Forward input data. We get it by removing all outgoing data not
* forwarded yet from HTX data size. If there are some data filters, we
@@ -2211,10 +2221,12 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
channel_htx_forward_forever(res, htx);
}
- if ((txn->meth == HTTP_METH_CONNECT && txn->status == 200) || txn->status == 101 ||
- (!(msg->flags & HTTP_MSGF_XFER_LEN) && (res->flags & CF_SHUTR || !HAS_RSP_DATA_FILTERS(s)))) {
- msg->msg_state = HTTP_MSG_TUNNEL;
- goto done;
+ if (htx->data != co_data(res))
+ goto missing_data_or_waiting;
+
+ if (!(msg->flags & HTTP_MSGF_XFER_LEN) && res->flags & CF_SHUTR) {
+ msg->msg_state = HTTP_MSG_ENDING;
+ goto ending;
}
/* Check if the end-of-message is reached and if so, switch the message
@@ -2225,14 +2237,11 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
goto missing_data_or_waiting;
msg->msg_state = HTTP_MSG_ENDING;
- if (htx->data != co_data(res))
- goto missing_data_or_waiting;
- msg->msg_state = HTTP_MSG_DONE;
- res->to_forward = 0;
- done:
- /* other states, DONE...TUNNEL */
- channel_dont_close(res);
+ ending:
+ /* other states, ENDING...TUNNEL */
+ if (msg->msg_state >= HTTP_MSG_DONE)
+ goto done;
if (HAS_RSP_DATA_FILTERS(s)) {
ret = flt_http_end(s, msg);
@@ -2243,6 +2252,20 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
}
}
+ if ((txn->meth == HTTP_METH_CONNECT && txn->status == 200) || txn->status == 101 ||
+ !(msg->flags & HTTP_MSGF_XFER_LEN)) {
+ msg->msg_state = HTTP_MSG_TUNNEL;
+ goto ending;
+ }
+ else {
+ msg->msg_state = HTTP_MSG_DONE;
+ res->to_forward = 0;
+ }
+
+ done:
+
+ channel_dont_close(res);
+
http_end_response(s);
if (!(res->analysers & an_bit)) {
http_end_request(s);

+ 133
- 0
net/haproxy/patches/030-BUG-MINOR-http-rules-Remove-buggy-deinit-functions-for-HTTP-rules.patch View File

@ -0,0 +1,133 @@
commit ff9be052e36d427df467b4a9b2f0a9b79af481a4
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Tue Dec 17 11:25:46 2019 +0100
BUG/MINOR: http-rules: Remove buggy deinit functions for HTTP rules
Functions to deinitialize the HTTP rules are buggy. These functions does not
check the action name to release the right part in the arg union. Only few info
are released. For auth rules, the realm is released and there is no problem
here. But the regex <arg.hdr_add.re> is always unconditionally released. So it
is easy to make these functions crash. For instance, with the following rule
HAProxy crashes during the deinit :
http-request set-map(/path/to/map) %[src] %[req.hdr(X-Value)]
For now, These functions are simply removed and we rely on the deinit function
used for TCP rules (renamed as deinit_act_rules()). This patch fixes the
bug. But arguments used by actions are not released at all, this part will be
addressed later.
This patch must be backported to all stable versions.
(cherry picked from commit cb5501327c7ece8a9b5b07c9a839419e45d9ee4a)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/include/proto/http_rules.h b/include/proto/http_rules.h
index 5e03dd813..608ca5760 100644
--- a/include/proto/http_rules.h
+++ b/include/proto/http_rules.h
@@ -32,8 +32,6 @@ extern struct action_kw_list http_res_keywords;
struct act_rule *parse_http_req_cond(const char **args, const char *file, int linenum, struct proxy *proxy);
struct act_rule *parse_http_res_cond(const char **args, const char *file, int linenum, struct proxy *proxy);
-void free_http_req_rules(struct list *r);
-void free_http_res_rules(struct list *r);
struct redirect_rule *http_parse_redirect_rule(const char *file, int linenum, struct proxy *curproxy,
const char **args, char **errmsg, int use_fmt, int dir);
diff --git a/src/haproxy.c b/src/haproxy.c
index a66a184dc..f225a13f8 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -2300,14 +2300,14 @@ static void deinit_acl_cond(struct acl_cond *cond)
free(cond);
}
-static void deinit_tcp_rules(struct list *rules)
+static void deinit_act_rules(struct list *rules)
{
- struct act_rule *trule, *truleb;
+ struct act_rule *rule, *ruleb;
- list_for_each_entry_safe(trule, truleb, rules, list) {
- LIST_DEL(&trule->list);
- deinit_acl_cond(trule->cond);
- free(trule);
+ list_for_each_entry_safe(rule, ruleb, rules, list) {
+ LIST_DEL(&rule->list);
+ deinit_acl_cond(rule->cond);
+ free(rule);
}
}
@@ -2452,9 +2452,12 @@ void deinit(void)
free(lf);
}
- deinit_tcp_rules(&p->tcp_req.inspect_rules);
- deinit_tcp_rules(&p->tcp_rep.inspect_rules);
- deinit_tcp_rules(&p->tcp_req.l4_rules);
+ deinit_act_rules(&p->tcp_req.inspect_rules);
+ deinit_act_rules(&p->tcp_rep.inspect_rules);
+ deinit_act_rules(&p->tcp_req.l4_rules);
+ deinit_act_rules(&p->tcp_req.l5_rules);
+ deinit_act_rules(&p->http_req_rules);
+ deinit_act_rules(&p->http_res_rules);
deinit_stick_rules(&p->storersp_rules);
deinit_stick_rules(&p->sticking_rules);
@@ -2556,8 +2559,6 @@ void deinit(void)
free(p->desc);
free(p->fwdfor_hdr_name);
- free_http_req_rules(&p->http_req_rules);
- free_http_res_rules(&p->http_res_rules);
task_destroy(p->task);
pool_destroy(p->req_cap_pool);
@@ -2582,7 +2583,7 @@ void deinit(void)
free(uap->desc);
userlist_free(uap->userlist);
- free_http_req_rules(&uap->http_req_rules);
+ deinit_act_rules(&uap->http_req_rules);
free(uap);
}
diff --git a/src/http_rules.c b/src/http_rules.c
index b790c5ffe..aad771466 100644
--- a/src/http_rules.c
+++ b/src/http_rules.c
@@ -1186,31 +1186,6 @@ struct redirect_rule *http_parse_redirect_rule(const char *file, int linenum, st
return NULL;
}
-void free_http_res_rules(struct list *r)
-{
- struct act_rule *tr, *pr;
-
- list_for_each_entry_safe(pr, tr, r, list) {
- LIST_DEL(&pr->list);
- regex_free(pr->arg.hdr_add.re);
- free(pr);
- }
-}
-
-void free_http_req_rules(struct list *r)
-{
- struct act_rule *tr, *pr;
-
- list_for_each_entry_safe(pr, tr, r, list) {
- LIST_DEL(&pr->list);
- if (pr->action == ACT_HTTP_REQ_AUTH)
- free(pr->arg.auth.realm);
-
- regex_free(pr->arg.hdr_add.re);
- free(pr);
- }
-}
-
__attribute__((constructor))
static void __http_rules_init(void)
{

+ 56
- 0
net/haproxy/patches/031-BUG-MINOR-stick-table-Use-MAX_SESS_STKCTR-as-the-max-track-ID-during-parsing.patch View File

@ -0,0 +1,56 @@
commit 1781e3834bb4a0b74d88d467bddc11e8fb811f17
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Wed Dec 18 10:25:46 2019 +0100
BUG/MINOR: stick-table: Use MAX_SESS_STKCTR as the max track ID during parsing
During the parsing of the sc-inc-gpc0, sc-inc-gpc1 and sc-inc-gpt1 actions, the
maximum stick table track ID allowed is tested against ACT_ACTION_TRK_SCMAX. It
is the action number and not the maximum number of stick counters. Instead,
MAX_SESS_STKCTR must be used.
This patch must be backported to all stable versions.
(cherry picked from commit 28436e23d313d5986ddb97c9b4a5a0e5e78b2a42)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/stick_table.c b/src/stick_table.c
index 1b70b468e..7b648475b 100644
--- a/src/stick_table.c
+++ b/src/stick_table.c
@@ -1916,9 +1916,9 @@ static enum act_parse_ret parse_inc_gpc0(const char **args, int *arg, struct pro
return ACT_RET_PRS_ERR;
}
- if (rule->arg.gpc.sc >= ACT_ACTION_TRK_SCMAX) {
+ if (rule->arg.gpc.sc >= MAX_SESS_STKCTR) {
memprintf(err, "invalid stick table track ID. The max allowed ID is %d",
- ACT_ACTION_TRK_SCMAX-1);
+ MAX_SESS_STKCTR-1);
return ACT_RET_PRS_ERR;
}
}
@@ -1998,9 +1998,9 @@ static enum act_parse_ret parse_inc_gpc1(const char **args, int *arg, struct pro
return ACT_RET_PRS_ERR;
}
- if (rule->arg.gpc.sc >= ACT_ACTION_TRK_SCMAX) {
+ if (rule->arg.gpc.sc >= MAX_SESS_STKCTR) {
memprintf(err, "invalid stick table track ID. The max allowed ID is %d",
- ACT_ACTION_TRK_SCMAX-1);
+ MAX_SESS_STKCTR-1);
return ACT_RET_PRS_ERR;
}
}
@@ -2107,9 +2107,9 @@ static enum act_parse_ret parse_set_gpt0(const char **args, int *arg, struct pro
return ACT_RET_PRS_ERR;
}
- if (rule->arg.gpt.sc >= ACT_ACTION_TRK_SCMAX) {
+ if (rule->arg.gpt.sc >= MAX_SESS_STKCTR) {
memprintf(err, "invalid stick table track ID '%s'. The max allowed ID is %d",
- args[*arg-1], ACT_ACTION_TRK_SCMAX-1);
+ args[*arg-1], MAX_SESS_STKCTR-1);
return ACT_RET_PRS_ERR;
}
}

+ 65
- 0
net/haproxy/patches/032-BUG-MINOR-tcp-rules-Fix-memory-releases-on-error-path-during-action-parsing.patch View File

@ -0,0 +1,65 @@
commit 630583cc735de8036ca9963a6e093d5fef90157e
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Tue Jan 14 15:05:56 2020 +0100
BUG/MINOR: tcp-rules: Fix memory releases on error path during action parsing
When an error occurred during the parsing of a TCP action, if some memory was
allocated, it should be released before exiting. Here, the fix consists for
replace a call to free() on a sample expression by a call to
release_sample_expr().
This patch may be backported to all supported versions.
(cherry picked from commit fdb6fbfa9a7b730939865b79bfbca3af278113b8)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/tcp_rules.c b/src/tcp_rules.c
index 86b4df538..27cc0c20b 100644
--- a/src/tcp_rules.c
+++ b/src/tcp_rules.c
@@ -694,7 +694,7 @@ static int tcp_parse_request_rule(char **args, int arg, int section_type,
memprintf(err,
"'%s %s %s' : fetch method '%s' extracts information from '%s', none of which is available here",
args[0], args[1], args[kw], args[arg-1], sample_src_names(expr->fetch->use));
- free(expr);
+ release_sample_expr(expr);
return -1;
}
@@ -704,7 +704,7 @@ static int tcp_parse_request_rule(char **args, int arg, int section_type,
memprintf(err,
"'%s %s %s' : missing length value",
args[0], args[1], args[kw]);
- free(expr);
+ release_sample_expr(expr);
return -1;
}
/* we copy the table name for now, it will be resolved later */
@@ -713,7 +713,7 @@ static int tcp_parse_request_rule(char **args, int arg, int section_type,
memprintf(err,
"'%s %s %s' : length must be > 0",
args[0], args[1], args[kw]);
- free(expr);
+ release_sample_expr(expr);
return -1;
}
arg++;
@@ -772,7 +772,7 @@ static int tcp_parse_request_rule(char **args, int arg, int section_type,
memprintf(err,
"'%s %s %s' : fetch method '%s' extracts information from '%s', none of which is available here",
args[0], args[1], args[kw], args[arg-1], sample_src_names(expr->fetch->use));
- free(expr);
+ release_sample_expr(expr);
return -1;
}
@@ -785,7 +785,7 @@ static int tcp_parse_request_rule(char **args, int arg, int section_type,
memprintf(err,
"'%s %s %s' : missing table name",
args[0], args[1], args[kw]);
- free(expr);
+ release_sample_expr(expr);
return -1;
}
/* we copy the table name for now, it will be resolved later */

+ 32
- 0
net/haproxy/patches/033-BUG-MINOR-ssl-ssl_sock_load_ocsp_response_from_file-memory-leak.patch View File

@ -0,0 +1,32 @@
commit 058a746aed714504781c3955b8c5147433bf4020
Author: Emmanuel Hocdet <manu@gandi.net>
Date: Thu Jan 16 14:41:36 2020 +0100
BUG/MINOR: ssl: ssl_sock_load_ocsp_response_from_file memory leak
"set ssl cert <filename.ocsp> <payload>" CLI command must free
previous context.
This patch should be backport to 2.1
(cherry picked from commit 0667faebcf55562d86c30af63f36fe86ba58fff9)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 1fac2d905..2c19fa5b3 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -945,7 +945,12 @@ static int ssl_sock_load_ocsp_response_from_file(const char *ocsp_path, char *bu
ocsp_response = NULL;
goto end;
}
-
+ /* no error, fill ckch with new context, old context must be free */
+ if (ckch->ocsp_response) {
+ free(ckch->ocsp_response->area);
+ ckch->ocsp_response->area = NULL;
+ free(ckch->ocsp_response);
+ }
ckch->ocsp_response = ocsp_response;
ret = 0;
end:

+ 31
- 0
net/haproxy/patches/034-BUG-MINOR-ssl-ssl_sock_load_issuer_file_into_ckch-memory-leak.patch View File

@ -0,0 +1,31 @@
commit 414139aa263974b1a8513c50a822e44c4767c66f
Author: Emmanuel Hocdet <manu@gandi.net>
Date: Thu Jan 16 14:45:00 2020 +0100
BUG/MINOR: ssl: ssl_sock_load_issuer_file_into_ckch memory leak
"set ssl cert <filename.issuer> <payload>" CLI command must free
previous context.
This patch should be backport to 2.1
(cherry picked from commit eb73dc34bbfbb5ffe8d9f3eb9d07fe981c938d8f)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 2c19fa5b3..1ec3a84bb 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -3076,8 +3076,11 @@ static int ssl_sock_load_issuer_file_into_ckch(const char *path, char *buf, stru
*err ? *err : "", path);
goto end;
}
- ret = 0;
+ /* no error, fill ckch with new context, old context must be free */
+ if (ckch->ocsp_issuer)
+ X509_free(ckch->ocsp_issuer);
ckch->ocsp_issuer = issuer;
+ ret = 0;
end:

+ 36
- 0
net/haproxy/patches/035-BUG-MINOR-ssl-ssl_sock_load_sctl_from_file-memory-leak.patch View File

@ -0,0 +1,36 @@
commit 16a997d2b725eabc6ceec94f57cc25e005845e4d
Author: Emmanuel Hocdet <manu@gandi.net>
Date: Thu Jan 16 15:15:49 2020 +0100
BUG/MINOR: ssl: ssl_sock_load_sctl_from_file memory leak
"set ssl cert <filename.sctl> <payload>" CLI command must free
previous context.
This patch should be backport to 2.1
(cherry picked from commit 224a087a271b513b3f0a0f08ed23cde42919e0f6)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 1ec3a84bb..4f1e7e78e 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -1445,10 +1445,14 @@ static int ssl_sock_load_sctl_from_file(const char *sctl_path, char *buf, struct
sctl = NULL;
goto end;
}
- ret = 0;
- /* TODO: free the previous SCTL in the ckch */
+ /* no error, fill ckch with new context, old context must be free */
+ if (ckch->sctl) {
+ free(ckch->sctl->area);
+ ckch->sctl->area = NULL;
+ free(ckch->sctl);
+ }
ckch->sctl = sctl;
-
+ ret = 0;
end:
if (fd != -1)
close(fd);

+ 140
- 0
net/haproxy/patches/036-MINOR-proxy-http-ana-Add-support-of-extra-attributes-for-the-cookie-directive.patch View File

@ -0,0 +1,140 @@
commit fac50825151ac2abc6b71343e3ffa6e0dc06c53d
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Tue Jan 21 11:06:48 2020 +0100
MINOR: proxy/http-ana: Add support of extra attributes for the cookie directive
It is now possible to insert any attribute when a cookie is inserted by
HAProxy. Any value may be set, no check is performed except the syntax validity
(CTRL chars and ';' are forbidden). For instance, it may be used to add the
SameSite attribute:
cookie SRV insert attr "SameSite=Strict"
The attr option may be repeated to add several attributes.
This patch should fix the issue #361.
(cherry picked from commit 2f5339079b884ac8bdde166add1879ebfd9e433b)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/doc/configuration.txt b/doc/configuration.txt
index 3f381e386..e0dc49880 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -3255,7 +3255,7 @@ compression offload
cookie <name> [ rewrite | insert | prefix ] [ indirect ] [ nocache ]
[ postonly ] [ preserve ] [ httponly ] [ secure ]
[ domain <domain> ]* [ maxidle <idle> ] [ maxlife <life> ]
- [ dynamic ]
+ [ dynamic ] [ attr <value> ]*
Enable cookie-based persistence in a backend.
May be used in sections : defaults | frontend | listen | backend
yes | no | yes | yes
@@ -3414,6 +3414,11 @@ cookie <name> [ rewrite | insert | prefix ] [ indirect ] [ nocache ]
The cookie will be regenerated each time the IP address change,
and is only generated for IPv4/IPv6.
+ attr This option tells haproxy to add an extra attribute when a
+ cookie is inserted. The attribute value can contain any
+ characters except control ones or ";". This option may be
+ repeated.
+
There can be only one persistence cookie per HTTP backend, and it can be
declared in a defaults section. The value of the cookie will be the value
indicated after the "cookie" keyword in a "server" statement. If no cookie
diff --git a/include/types/proxy.h b/include/types/proxy.h
index 6ea96b3ad..3661c9a0c 100644
--- a/include/types/proxy.h
+++ b/include/types/proxy.h
@@ -338,6 +338,7 @@ struct proxy {
int cookie_len; /* strlen(cookie_name), computed only once */
char *cookie_domain; /* domain used to insert the cookie */
char *cookie_name; /* name of the cookie to look for */
+ char *cookie_attrs; /* list of attributes to add to the cookie */
char *dyncookie_key; /* Secret key used to generate dynamic persistent cookies */
unsigned int cookie_maxidle; /* max idle time for this cookie */
unsigned int cookie_maxlife; /* max life time for this cookie */
diff --git a/src/cfgparse-listen.c b/src/cfgparse-listen.c
index 9975e4687..b1f5c0790 100644
--- a/src/cfgparse-listen.c
+++ b/src/cfgparse-listen.c
@@ -323,6 +323,8 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
curproxy->rdp_cookie_name = strdup(defproxy.rdp_cookie_name);
curproxy->rdp_cookie_len = defproxy.rdp_cookie_len;
+ if (defproxy.cookie_attrs)
+ curproxy->cookie_attrs = strdup(defproxy.cookie_attrs);
if (defproxy.lbprm.arg_str)
curproxy->lbprm.arg_str = strdup(defproxy.lbprm.arg_str);
@@ -473,6 +475,7 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
free(defproxy.rdp_cookie_name);
free(defproxy.dyncookie_key);
free(defproxy.cookie_domain);
+ free(defproxy.cookie_attrs);
free(defproxy.lbprm.arg_str);
free(defproxy.capture_name);
free(defproxy.monitor_uri);
@@ -986,9 +989,34 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
err_code |= ERR_WARN;
curproxy->ck_opts |= PR_CK_DYNAMIC;
}
+ else if (!strcmp(args[cur_arg], "attr")) {
+ char *val;
+ if (!*args[cur_arg + 1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects <value> as argument.\n",
+ file, linenum, args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ val = args[cur_arg + 1];
+ while (*val) {
+ if (iscntrl(*val) || *val == ';') {
+ ha_alert("parsing [%s:%d]: character '%%x%02X' is not permitted in attribute value.\n",
+ file, linenum, *val);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ val++;
+ }
+ /* don't add ';' for the first attribute */
+ if (!curproxy->cookie_attrs)
+ curproxy->cookie_attrs = strdup(args[cur_arg + 1]);
+ else
+ memprintf(&curproxy->cookie_attrs, "%s; %s", curproxy->cookie_attrs, args[cur_arg + 1]);
+ cur_arg++;
+ }
else {
- ha_alert("parsing [%s:%d] : '%s' supports 'rewrite', 'insert', 'prefix', 'indirect', 'nocache', 'postonly', 'domain', 'maxidle', 'dynamic' and 'maxlife' options.\n",
+ ha_alert("parsing [%s:%d] : '%s' supports 'rewrite', 'insert', 'prefix', 'indirect', 'nocache', 'postonly', 'domain', 'maxidle', 'dynamic', 'maxlife' and 'attr' options.\n",
file, linenum, args[0]);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
diff --git a/src/haproxy.c b/src/haproxy.c
index f225a13f8..98d6a9a39 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -2352,6 +2352,7 @@ void deinit(void)
free(p->check_req);
free(p->cookie_name);
free(p->cookie_domain);
+ free(p->cookie_attrs);
free(p->lbprm.arg_str);
free(p->capture_name);
free(p->monitor_uri);
diff --git a/src/http_ana.c b/src/http_ana.c
index 047ed813a..cb5a60ca9 100644
--- a/src/http_ana.c
+++ b/src/http_ana.c
@@ -2027,6 +2027,9 @@ int http_process_res_common(struct stream *s, struct channel *rep, int an_bit, s
if (s->be->ck_opts & PR_CK_SECURE)
chunk_appendf(&trash, "; Secure");
+ if (s->be->cookie_attrs)
+ chunk_appendf(&trash, "; %s", s->be->cookie_attrs);
+
if (unlikely(!http_add_header(htx, ist("Set-Cookie"), ist2(trash.area, trash.data))))
goto return_bad_resp;

+ 89
- 0
net/haproxy/patches/037-BUG-MINOR-http_act-dont-check-capture-id-in-backend.patch View File

@ -0,0 +1,89 @@
commit 3480d55e0406e47214291eb8292a037fdca2859f
Author: Baptiste Assmann <bedis9@gmail.com>
Date: Thu Jan 16 14:34:22 2020 +0100
BUG/MINOR: http_act: don't check capture id in backend
A wrong behavior was introduced by
e9544935e86278dfa3d49fb4b97b860774730625, leading to preventing loading
any configuration where a capture slot id is used in a backend.
IE, the configuration below does not parse:
frontend f
bind *:80
declare capture request len 32
default_backend webserver
backend webserver
http-request capture req.hdr(Host) id 1
The point is that such type of configuration is valid and should run.
This patch enforces the check of capture slot id only if the action rule
is configured in a frontend.
The point is that at configuration parsing time, it is impossible to
check which frontend could point to this backend (furthermore if we use
dynamic backend name resolution at runtime).
The documentation has been updated to warn the user to ensure that
relevant frontends have required declaration when such rule has to be
used in a backend.
If no capture slot can be found, then the action will just not be
executed and HAProxy will process the next one in the list, as expected.
This should be backported to all supported branches (bug created as part
of a bug fix introduced into 1.7 and backported to 1.6).
(cherry picked from commit 19a69b3740702ce5503a063e9dfbcea5b9187d27)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/doc/configuration.txt b/doc/configuration.txt
index e0dc49880..36291a339 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -4278,9 +4278,12 @@ http-request capture <sample> [ len <length> | id <id> ]
If the keyword "id" is used instead of "len", the action tries to store the
captured string in a previously declared capture slot. This is useful to run
captures in backends. The slot id can be declared by a previous directive
- "http-request capture" or with the "declare capture" keyword. If the slot
- <id> doesn't exist, then HAProxy fails parsing the configuration to prevent
- unexpected behavior at run time.
+ "http-request capture" or with the "declare capture" keyword.
+
+ When using this action in a backend, double check that the relevant
+ frontend(s) have the required capture slots otherwise, this rule will be
+ ignored at run time. This can't be detected at configuration parsing time
+ due to HAProxy's ability to dynamically resolve backend name at runtime.
http-request del-acl(<file-name>) <key fmt> [ { if | unless } <condition> ]
@@ -4959,8 +4962,11 @@ http-response capture <sample> id <id> [ { if | unless } <condition> ]
This is useful to run captures in backends. The slot id can be declared by a
previous directive "http-response capture" or with the "declare capture"
keyword.
- If the slot <id> doesn't exist, then HAProxy fails parsing the configuration
- to prevent unexpected behavior at run time.
+
+ When using this action in a backend, double check that the relevant
+ frontend(s) have the required capture slots otherwise, this rule will be
+ ignored at run time. This can't be detected at configuration parsing time
+ due to HAProxy's ability to dynamically resolve backend name at runtime.
http-response del-acl(<file-name>) <key fmt> [ { if | unless } <condition> ]
diff --git a/src/http_act.c b/src/http_act.c
index c8d9220fe..8ff8e886d 100644
--- a/src/http_act.c
+++ b/src/http_act.c
@@ -424,7 +424,10 @@ static int check_http_req_capture(struct act_rule *rule, struct proxy *px, char
if (rule->action_ptr != http_action_req_capture_by_id)
return 1;
- if (rule->arg.capid.idx >= px->nb_req_cap) {
+ /* capture slots can only be declared in frontends, so we can't check their
+ * existence in backends at configuration parsing step
+ */
+ if (px->cap & PR_CAP_FE && rule->arg.capid.idx >= px->nb_req_cap) {
memprintf(err, "unable to find capture id '%d' referenced by http-request capture rule",
rule->arg.capid.idx);
return 0;

+ 30
- 0
net/haproxy/patches/038-BUG-MEDIUM-netscaler-Dont-forget-to-allocate-storage-for-conn--src-dst.patch View File

@ -0,0 +1,30 @@
commit ad9954f2e723d37fed3a3a777fa6eecfa930fd11
Author: Olivier Houchard <ohouchard@haproxy.com>
Date: Wed Jan 22 15:31:09 2020 +0100
BUG/MEDIUM: netscaler: Don't forget to allocate storage for conn->src/dst.
In conn_recv_netscaler_cip(), don't forget to allocate conn->src and
conn->dst, as those are now dynamically allocated. Not doing so results in
getting a crash when using netscaler.
This should fix github issue #460.
This should be backported to 2.1.
(cherry picked from commit 1a9dbe58a66516e6acc504ed2f185fd9d86a5e6d)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/connection.c b/src/connection.c
index 7a2ab2499..b48049e5a 100644
--- a/src/connection.c
+++ b/src/connection.c
@@ -740,6 +740,9 @@ int conn_recv_netscaler_cip(struct connection *conn, int flag)
if (!conn_ctrl_ready(conn))
goto fail;
+ if (!sockaddr_alloc(&conn->src) || !sockaddr_alloc(&conn->dst))
+ goto fail;
+
if (!fd_recv_ready(conn->handle.fd))
goto not_ready;

+ 129
- 0
net/haproxy/patches/039-BUG-MINOR-ssl-ssl_sock_load_pem_into_ckch-is-not-consistent.patch View File

@ -0,0 +1,129 @@
commit 04184b70503780533533f9ff15cf43af2c0eb820
Author: Emmanuel Hocdet <manu@gandi.net>
Date: Fri Dec 20 17:47:12 2019 +0100
BUG/MINOR: ssl: ssl_sock_load_pem_into_ckch is not consistent
"set ssl cert <filename> <payload>" CLI command should have the same
result as reload HAproxy with the updated pem file (<filename>).
Is not the case, DHparams/cert-chain is kept from the previous
context if no DHparams/cert-chain is set in the context (<payload>).
This patch should be backport to 2.1
(cherry picked from commit 6b5b44e10fa1c5da18a120fd78082317036900e2)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/include/common/tools.h b/include/common/tools.h
index 398383ad3..961060109 100644
--- a/include/common/tools.h
+++ b/include/common/tools.h
@@ -33,6 +33,8 @@
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif
+#define SWAP(a, b) do { typeof(a) t; t = a; a = b; b = t; } while(0)
+
/* return an integer of type <ret> with only the highest bit set. <ret> may be
* both a variable or a type.
*/
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 4f1e7e78e..b65da399f 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -3112,7 +3112,8 @@ static int ssl_sock_load_pem_into_ckch(const char *path, char *buf, struct cert_
X509 *ca;
X509 *cert = NULL;
EVP_PKEY *key = NULL;
- DH *dh;
+ DH *dh = NULL;
+ STACK_OF(X509) *chain = NULL;
if (buf) {
/* reading from a buffer */
@@ -3150,13 +3151,6 @@ static int ssl_sock_load_pem_into_ckch(const char *path, char *buf, struct cert_
dh = PEM_read_bio_DHparams(in, NULL, NULL, NULL);
/* no need to return an error there, dh is not mandatory */
-
- if (dh) {
- if (ckch->dh)
- DH_free(ckch->dh);
- ckch->dh = dh;
- }
-
#endif
/* Seek back to beginning of file */
@@ -3180,39 +3174,19 @@ static int ssl_sock_load_pem_into_ckch(const char *path, char *buf, struct cert_
goto end;
}
- /* Key and Cert are good, we can use them in the ckch */
- if (ckch->key) /* free the previous key */
- EVP_PKEY_free(ckch->key);
- ckch->key = key;
- key = NULL;
-
- if (ckch->cert) /* free the previous cert */
- X509_free(ckch->cert);
- ckch->cert = cert;
- cert = NULL;
-
/* Look for a Certificate Chain */
- ca = PEM_read_bio_X509(in, NULL, NULL, NULL);
- if (ca) {
- /* there is a chain a in the PEM, clean the previous one in the CKCH */
- if (ckch->chain) /* free the previous chain */
- sk_X509_pop_free(ckch->chain, X509_free);
- ckch->chain = sk_X509_new_null();
- if (!sk_X509_push(ckch->chain, ca)) {
+ while ((ca = PEM_read_bio_X509(in, NULL, NULL, NULL))) {
+ if (chain == NULL)
+ chain = sk_X509_new_null();
+ if (!sk_X509_push(chain, ca)) {
X509_free(ca);
goto end;
}
}
- /* look for other crt in the chain */
- while ((ca = PEM_read_bio_X509(in, NULL, NULL, NULL)))
- if (!sk_X509_push(ckch->chain, ca)) {
- X509_free(ca);
- goto end;
- }
/* no chain */
- if (ckch->chain == NULL) {
- ckch->chain = sk_X509_new_null();
+ if (chain == NULL) {
+ chain = sk_X509_new_null();
}
ret = ERR_get_error();
@@ -3222,6 +3196,12 @@ static int ssl_sock_load_pem_into_ckch(const char *path, char *buf, struct cert_
goto end;
}
+ /* no error, fill ckch with new context, old context will be free at end: */
+ SWAP(ckch->key, key);
+ SWAP(ckch->dh, dh);
+ SWAP(ckch->cert, cert);
+ SWAP(ckch->chain, chain);
+
ret = 0;
end:
@@ -3231,8 +3211,12 @@ end:
BIO_free(in);
if (key)
EVP_PKEY_free(key);
+ if (dh)
+ DH_free(dh);
if (cert)
X509_free(cert);
+ if (chain)
+ sk_X509_pop_free(chain, X509_free);
return ret;
}

+ 48
- 0
net/haproxy/patches/040-BUG-MINOR-ssl-cli-free-the-previous-ckch-content-once-a-PEM-is-loaded.patch View File

@ -0,0 +1,48 @@
commit 9f77fd742697cc2774c6a50204cb9f5b6909e930
Author: William Lallemand <wlallemand@haproxy.com>
Date: Thu Jan 23 10:56:05 2020 +0100
BUG/MINOR: ssl/cli: free the previous ckch content once a PEM is loaded
When using "set ssl cert" on the CLI, if we load a new PEM, the previous
sctl, issuer and OCSP response are still loaded. This doesn't make any
sense since they won't be usable with a new private key.
This patch free the previous data.
Should be backported in 2.1.
(cherry picked from commit 75b15f790f2be0600483476c1505fec0ce898e35)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index b65da399f..e320d908f 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -3196,6 +3196,26 @@ static int ssl_sock_load_pem_into_ckch(const char *path, char *buf, struct cert_
goto end;
}
+ /* once it loaded the PEM, it should remove everything else in the ckch */
+ if (ckch->ocsp_response) {
+ free(ckch->ocsp_response->area);
+ ckch->ocsp_response->area = NULL;
+ free(ckch->ocsp_response);
+ ckch->ocsp_response = NULL;
+ }
+
+ if (ckch->sctl) {
+ free(ckch->sctl->area);
+ ckch->sctl->area = NULL;
+ free(ckch->sctl);
+ ckch->sctl = NULL;
+ }
+
+ if (ckch->ocsp_issuer) {
+ X509_free(ckch->ocsp_issuer);
+ ckch->ocsp_issuer = NULL;
+ }
+
/* no error, fill ckch with new context, old context will be free at end: */
SWAP(ckch->key, key);
SWAP(ckch->dh, dh);

+ 30
- 0
net/haproxy/patches/041-CLEANUP-stats-shut-up-a-wrong-null-deref-warning-from-gcc-9-2.patch View File

@ -0,0 +1,30 @@
commit 03abacf806d155ca50fae612c0f999071625dd1d
Author: Willy Tarreau <w@1wt.eu>
Date: Thu Jan 23 11:47:13 2020 +0100
CLEANUP: stats: shut up a wrong null-deref warning from gcc 9.2
As reported in bug #447, gcc 9.2 invents impossible code paths and then
complains that we don't check for our pointers to be NULL... This code
path is not critical, better add the test to shut it up than try to
help it being less creative.
This code hasn't changed for a while, so it could help distros to
backport this to older releases.
(cherry picked from commit 027d206b57bec59397eb6fb23f8ff4e3a2edb2e1)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/stats.c b/src/stats.c
index 32236f457..befa75b30 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -2400,7 +2400,7 @@ static void stats_dump_html_head(struct appctx *appctx, struct uri_auth *uri)
"-->\n"
"</style></head>\n",
(appctx->ctx.stats.flags & STAT_SHNODE) ? " on " : "",
- (appctx->ctx.stats.flags & STAT_SHNODE) ? (uri->node ? uri->node : global.node) : ""
+ (appctx->ctx.stats.flags & STAT_SHNODE) ? (uri && uri->node ? uri->node : global.node) : ""
);
}

+ 28
- 0
net/haproxy/patches/042-BUG-MINOR-ssl-increment-issuer-refcount-if-in-chain.patch View File

@ -0,0 +1,28 @@
commit 0ebb1d424da107ad4010b261f63e16e857465fc0
Author: William Lallemand <wlallemand@haproxy.org>
Date: Thu Jan 23 11:42:52 2020 +0100
BUG/MINOR: ssl: increment issuer refcount if in chain
When using the OCSP response, if the issuer of the response is in
the certificate chain, its address will be stored in ckch->ocsp_issuer.
However, since the ocsp_issuer could be filled by a separate file, this
pointer is free'd. The refcount of the X509 need to be incremented to
avoid a double free if we free the ocsp_issuer AND the chain.
(cherry picked from commit b829dda57b4c8a44eff53682ed56492ad46ce3ad)
[wt: checked with William, needed for 2.1]
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index e320d908f..180637e6b 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -3307,6 +3307,7 @@ static int ssl_sock_load_files_into_ckch(const char *path, struct cert_key_and_c
issuer = sk_X509_value(ckch->chain, i);
if (X509_check_issued(issuer, ckch->cert) == X509_V_OK) {
ckch->ocsp_issuer = issuer;
+ X509_up_ref(ckch->ocsp_issuer);
break;
} else
issuer = NULL;

+ 29
- 0
net/haproxy/patches/043-BUG-MINOR-ssl-memory-leak-w-the-ocsp_issuer.patch View File

@ -0,0 +1,29 @@
commit c91a4d8dda53f3fb0ab98335f201a30f926349bc
Author: William Lallemand <wlallemand@haproxy.org>
Date: Thu Jan 23 11:53:13 2020 +0100
BUG/MINOR: ssl: memory leak w/ the ocsp_issuer
This patch frees the ocsp_issuer in
ssl_sock_free_cert_key_and_chain_contents().
Shoudl be backported in 2.1.
(cherry picked from commit 5c3c96fd361f7ab6ae237af802d04fe31720da1b)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 180637e6b..af285938e 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -2965,6 +2965,10 @@ static void ssl_sock_free_cert_key_and_chain_contents(struct cert_key_and_chain
free(ckch->ocsp_response);
ckch->ocsp_response = NULL;
}
+
+ if (ckch->ocsp_issuer)
+ X509_free(ocsp_issuer);
+ ckch->ocsp_issuer = NULL;
}
/*

+ 27
- 0
net/haproxy/patches/044-BUG-MINOR-ssl-typo-in-previous-patch.patch View File

@ -0,0 +1,27 @@
commit 5d5c377717cfd5230150985c55322f1c5bb61a4e
Author: William Lallemand <wlallemand@haproxy.org>
Date: Thu Jan 23 11:59:02 2020 +0100
BUG/MINOR: ssl: typo in previous patch
The previous patch 5c3c96f ("BUG/MINOR: ssl: memory leak w/ the
ocsp_issuer") contains a typo that prevent it to build.
Should be backported in 2.1.
(cherry picked from commit dad239d08be1f2abe7e54d9332f1eb87acebf987)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index af285938e..713c8aedd 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -2967,7 +2967,7 @@ static void ssl_sock_free_cert_key_and_chain_contents(struct cert_key_and_chain
}
if (ckch->ocsp_issuer)
- X509_free(ocsp_issuer);
+ X509_free(ckch->ocsp_issuer);
ckch->ocsp_issuer = NULL;
}

+ 67
- 0
net/haproxy/patches/045-BUG-MINOR-ssl-cli-ocsp_issuer-must-be-set-w-set-ssl-cert.patch View File

@ -0,0 +1,67 @@
commit f298352f4042ac2b0db5c12484c9d84f234fe3cd
Author: Emmanuel Hocdet <manu@gandi.net>
Date: Wed Jan 22 17:02:53 2020 +0100
BUG/MINOR: ssl/cli: ocsp_issuer must be set w/ "set ssl cert"
ocsp_issuer is primary set from ckch->chain when PEM is loaded from file,
but not set when PEM is loaded via CLI payload. Set ckch->ocsp_issuer in
ssl_sock_load_pem_into_ckch to fix that.
Should be backported in 2.1.
(cherry picked from commit 078156d06399282ae467a9d1a450a42238870028)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 713c8aedd..2cc5ae80e 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -3113,6 +3113,7 @@ static int ssl_sock_load_pem_into_ckch(const char *path, char *buf, struct cert_
{
BIO *in = NULL;
int ret = 1;
+ int i;
X509 *ca;
X509 *cert = NULL;
EVP_PKEY *key = NULL;
@@ -3226,6 +3227,15 @@ static int ssl_sock_load_pem_into_ckch(const char *path, char *buf, struct cert_
SWAP(ckch->cert, cert);
SWAP(ckch->chain, chain);
+ /* check if one of the certificate of the chain is the issuer */
+ for (i = 0; i < sk_X509_num(ckch->chain); i++) {
+ X509 *issuer = sk_X509_value(ckch->chain, i);
+ if (X509_check_issued(issuer, ckch->cert) == X509_V_OK) {
+ ckch->ocsp_issuer = issuer;
+ X509_up_ref(issuer);
+ break;
+ }
+ }
ret = 0;
end:
@@ -3303,22 +3313,8 @@ static int ssl_sock_load_files_into_ckch(const char *path, struct cert_key_and_c
#ifndef OPENSSL_IS_BORINGSSL /* Useless for BoringSSL */
if (ckch->ocsp_response) {
- X509 *issuer;
- int i;
-
- /* check if one of the certificate of the chain is the issuer */
- for (i = 0; i < sk_X509_num(ckch->chain); i++) {
- issuer = sk_X509_value(ckch->chain, i);
- if (X509_check_issued(issuer, ckch->cert) == X509_V_OK) {
- ckch->ocsp_issuer = issuer;
- X509_up_ref(ckch->ocsp_issuer);
- break;
- } else
- issuer = NULL;
- }
-
/* if no issuer was found, try to load an issuer from the .issuer */
- if (!issuer) {
+ if (!ckch->ocsp_issuer) {
struct stat st;
char fp[MAXPATHLEN+1];

+ 46
- 0
net/haproxy/patches/046-BUG-MEDIUM-0rtt-Only-consider-the-SSL-handshake.patch View File

@ -0,0 +1,46 @@
commit 00ae17b75d20b30ab445970afb6a15f5d11cf257
Author: Olivier Houchard <ohouchard@haproxy.com>
Date: Thu Jan 23 14:57:36 2020 +0100
BUG/MEDIUM: 0rtt: Only consider the SSL handshake.
We only add the Early-data header, or get ssl_fc_has_early to return 1, if
we didn't already did the SSL handshake, as otherwise, we know the early
data were fine, and there's no risk of replay attack. But to do so, we
wrongly checked CO_FL_HANDSHAKE, we have to check CO_FL_SSL_WAIT_HS instead,
as we don't care about the status of any other handshake.
This should be backported to 2.1, 2.0, and 1.9.
When deciding if we should add the Early-Data header, or if the sample fetch
should return
(cherry picked from commit 220a26c31647b8cfd76f3922d08cb2e847e3009e)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/http_ana.c b/src/http_ana.c
index cb5a60ca9..fc4ca4f49 100644
--- a/src/http_ana.c
+++ b/src/http_ana.c
@@ -519,7 +519,7 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
}
if (conn && (conn->flags & CO_FL_EARLY_DATA) &&
- (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_HANDSHAKE))) {
+ (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_SSL_WAIT_HS))) {
struct http_hdr_ctx ctx;
ctx.blk = NULL;
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index 2cc5ae80e..c6888c128 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -7200,7 +7200,7 @@ smp_fetch_ssl_fc_has_early(const struct arg *args, struct sample *smp, const cha
}
#else
smp->data.u.sint = ((conn->flags & CO_FL_EARLY_DATA) &&
- (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_HANDSHAKE))) ? 1 : 0;
+ (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_SSL_WAIT_HS))) ? 1 : 0;
#endif
return 1;
}

+ 45
- 0
net/haproxy/patches/047-BUG-MINOR-stktable-report-the-current-proxy-name-in-error-messages.patch View File

@ -0,0 +1,45 @@
commit e66ed8abc963ec689e2ba672e1be90249ab2612c
Author: Willy Tarreau <w@1wt.eu>
Date: Fri Jan 24 07:19:34 2020 +0100
BUG/MINOR: stktable: report the current proxy name in error messages
Since commit 1b8e68e89a ("MEDIUM: stick-table: Stop handling stick-tables
as proxies."), a rule referencing the current proxy with no table leads
to the following error :
[ALERT] 023/071924 (16479) : Proxy 'px': unable to find stick-table '(null)'.
[ALERT] 023/071914 (16479) : Fatal errors found in configuration.
for a config like this one:
backend px
stick on src
This patch fixes it and should be backported as far as 2.0.
(cherry picked from commit 508d232a06cf082ff2cc694d3f1c03b10a07e719)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/cfgparse.c b/src/cfgparse.c
index 2e200e885..7f884df7c 100644
--- a/src/cfgparse.c
+++ b/src/cfgparse.c
@@ -2722,7 +2722,7 @@ int check_config_validity()
if (!target) {
ha_alert("Proxy '%s': unable to find stick-table '%s'.\n",
- curproxy->id, mrule->table.name);
+ curproxy->id, mrule->table.name ? mrule->table.name : curproxy->id);
cfgerr++;
}
else if (!stktable_compatible_sample(mrule->expr, target->type)) {
@@ -2760,7 +2760,7 @@ int check_config_validity()
if (!target) {
ha_alert("Proxy '%s': unable to find store table '%s'.\n",
- curproxy->id, mrule->table.name);
+ curproxy->id, mrule->table.name ? mrule->table.name : curproxy->id);
cfgerr++;
}
else if (!stktable_compatible_sample(mrule->expr, target->type)) {

+ 69
- 0
net/haproxy/patches/048-BUG-MEDIUM-mux-h2-make-sure-we-dont-emit-TE-headers-with-anything-but-trailers.patch View File

@ -0,0 +1,69 @@
commit e22b3fb31968569194b1f848fadb4ca01f4dfc73
Author: Willy Tarreau <w@1wt.eu>
Date: Fri Jan 24 09:07:53 2020 +0100
BUG/MEDIUM: mux-h2: make sure we don't emit TE headers with anything but "trailers"
While the H2 parser properly checks for the absence of anything but
"trailers" in the TE header field, we forget to check this when sending
the request to an H2 server. The problem is that an H2->H2 conversion
may keep "gzip" and fail on the next stage.
This patch makes sure that we only send "TE: trailers" if the TE header
contains the "trailers" token, otherwise it's dropped.
This fixes issue #464 and should be backported till 1.9.
(cherry picked from commit bb2c4ae06566b8a8789caca4c48524aeb88cbc1b)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 8a82f60fd..15a5cd757 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -5034,23 +5034,36 @@ static size_t h2s_bck_make_req_headers(struct h2s *h2s, struct htx *htx)
* do not provide an authority.
*/
for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
+ struct ist n = list[hdr].n;
+ struct ist v = list[hdr].v;
+
/* these ones do not exist in H2 and must be dropped. */
- if (isteq(list[hdr].n, ist("connection")) ||
- (auth.len && isteq(list[hdr].n, ist("host"))) ||
- isteq(list[hdr].n, ist("proxy-connection")) ||
- isteq(list[hdr].n, ist("keep-alive")) ||
- isteq(list[hdr].n, ist("upgrade")) ||
- isteq(list[hdr].n, ist("transfer-encoding")))
+ if (isteq(n, ist("connection")) ||
+ (auth.len && isteq(n, ist("host"))) ||
+ isteq(n, ist("proxy-connection")) ||
+ isteq(n, ist("keep-alive")) ||
+ isteq(n, ist("upgrade")) ||
+ isteq(n, ist("transfer-encoding")))
continue;
+ if (isteq(n, ist("te"))) {
+ /* "te" may only be sent with "trailers" if this value
+ * is present, otherwise it must be deleted.
+ */
+ v = istist(v, ist("trailers"));
+ if (!v.ptr || (v.len > 8 && v.ptr[8] != ','))
+ continue;
+ v = ist("trailers");
+ }
+
/* Skip all pseudo-headers */
- if (*(list[hdr].n.ptr) == ':')
+ if (*(n.ptr) == ':')
continue;
- if (isteq(list[hdr].n, ist("")))
+ if (isteq(n, ist("")))
break; // end
- if (!hpack_encode_header(&outbuf, list[hdr].n, list[hdr].v)) {
+ if (!hpack_encode_header(&outbuf, n, v)) {
/* output full */
if (b_space_wraps(mbuf))
goto realign_again;

+ 50
- 0
net/haproxy/patches/049-BUILD-cfgparse-silence-a-bogus-gcc-warning-on-32-bit-machines.patch View File

@ -0,0 +1,50 @@
commit eb94d47fbc0abc3c0b29a2f0a2bc666db38e2e87
Author: Willy Tarreau <w@1wt.eu>
Date: Fri Jan 24 11:19:13 2020 +0100
BUILD: cfgparse: silence a bogus gcc warning on 32-bit machines
A first patch was made during 2.0-dev to silence a bogus warning emitted
by gcc : dd1c8f1f72 ("MINOR: cfgparse: Add a cast to make gcc happier."),
but it happens it was not sufficient as the warning re-appeared on 32-bit
machines under gcc-8 and gcc-9 :
src/cfgparse.c: In function 'check_config_validity':
src/cfgparse.c:3642:33: warning: argument 1 range [2147483648, 4294967295] exceeds maximum object size 2147483647 [-Walloc-size-larger-than=]
newsrv->idle_orphan_conns = calloc((unsigned int)global.nbthread, sizeof(*newsrv->idle_orphan_conns));
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This warning doesn't trigger in other locations, and it immediately
vanishes if the previous or subsequent loops do not depend on
global.nbthread anymore, or if the field ordering of the struct server
changes! As discussed in the thread at:
https://www.mail-archive.com/haproxy@formilux.org/msg36107.html
playing with -Walloc-size-larger-than has no effect. And a minimal
reproducer could be isolated, indicating it's pointless to circle around
this one. Let's just cast nbthread to ushort so that gcc cannot make
this wrong detection. It's unlikely we'll use more than 65535 threads in
the near future anyway.
This may be backported to older releases if they are also affected, at
least to ease the job of distro maintainers.
Thanks to Ilya for testing.
(cherry picked from commit 645c588e7138526ccb71f3c47f00045cdf1d8510)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/cfgparse.c b/src/cfgparse.c
index 7f884df7c..2a22405a3 100644
--- a/src/cfgparse.c
+++ b/src/cfgparse.c
@@ -3656,7 +3656,7 @@ out_uri_auth_compat:
MT_LIST_INIT(&toremove_connections[i]);
}
}
- newsrv->idle_orphan_conns = calloc((unsigned int)global.nbthread, sizeof(*newsrv->idle_orphan_conns));
+ newsrv->idle_orphan_conns = calloc((unsigned short)global.nbthread, sizeof(*newsrv->idle_orphan_conns));
if (!newsrv->idle_orphan_conns)
goto err;
for (i = 0; i < global.nbthread; i++)

+ 38
- 0
net/haproxy/patches/050-MINOR-lua-Add-hlua_prepend_path-function.patch View File

@ -0,0 +1,38 @@
commit ed5d6a9f3c2a1cf9e0408c438c76c0643df9d6a5
Author: Tim Duesterhus <tim@bastelstu.be>
Date: Sun Jan 12 13:55:39 2020 +0100
MINOR: lua: Add hlua_prepend_path function
This function is added in preparation for following patches.
(cherry picked from commit c9fc9f2836f1e56eef3eaf690421eeff34dd8a2b)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/hlua.c b/src/hlua.c
index 37f786687..10d615211 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -7458,6 +7458,22 @@ static int hlua_load(char **args, int section_type, struct proxy *curpx,
return 0;
}
+/* Prepend the given <path> followed by a semicolon to the `package.<type>` variable
+ * in the given <ctx>.
+ */
+static int hlua_prepend_path(struct hlua ctx, char *type, char *path)
+{
+ lua_getglobal(ctx.T, "package"); /* push package variable */
+ lua_pushstring(ctx.T, path); /* push given path */
+ lua_pushstring(ctx.T, ";"); /* push semicolon */
+ lua_getfield(ctx.T, -3, type); /* push old path */
+ lua_concat(ctx.T, 3); /* concatenate to new path */
+ lua_setfield(ctx.T, -2, type); /* store new path */
+ lua_pop(ctx.T, 1); /* pop package variable */
+
+ return 0;
+}
+
/* configuration keywords declaration */
static struct cfg_kw_list cfg_kws = {{ },{
{ CFG_GLOBAL, "lua-load", hlua_load },

+ 98
- 0
net/haproxy/patches/051-MINOR-lua-Add-lua-prepend-path-configuration-option.patch View File

@ -0,0 +1,98 @@
commit c5438ed610bde49957d8d406f6e98a481e68bef3
Author: Tim Duesterhus <tim@bastelstu.be>
Date: Sun Jan 12 13:55:40 2020 +0100
MINOR: lua: Add lua-prepend-path configuration option
lua-prepend-path allows the administrator to specify a custom Lua library
path to load custom Lua modules that are useful within the context of HAProxy
without polluting the global Lua library folder.
(cherry picked from commit dd74b5f2372f610cfa60e8cb2e151e2de377357e)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/doc/configuration.txt b/doc/configuration.txt
index 36291a339..54d155b36 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -598,6 +598,7 @@ The following keywords are supported in the "global" section :
- log-tag
- log-send-hostname
- lua-load
+ - lua-prepend-path
- mworker-max-reloads
- nbproc
- nbthread
@@ -1037,6 +1038,31 @@ lua-load <file>
This global directive loads and executes a Lua file. This directive can be
used multiple times.
+lua-prepend-path <string> [<type>]
+ Prepends the given string followed by a semicolon to Lua's package.<type>
+ variable.
+ <type> must either be "path" or "cpath". If <type> is not given it defaults
+ to "path".
+
+ Lua's paths are semicolon delimited lists of patterns that specify how the
+ `require` function attempts to find the source file of a library. Question
+ marks (?) within a pattern will be replaced by module name. The path is
+ evaluated left to right. This implies that paths that are prepended later
+ will be checked earlier.
+
+ As an example by specifying the following path:
+
+ lua-prepend-path /usr/share/haproxy-lua/?/init.lua
+ lua-prepend-path /usr/share/haproxy-lua/?.lua
+
+ When `require "example"` is being called Lua will first attempt to load the
+ /usr/share/haproxy-lua/example.lua script, if that does not exist the
+ /usr/share/haproxy-lua/example/init.lua will be attempted and the default
+ paths if that does not exist either.
+
+ See https://www.lua.org/pil/8.1.html for the details within the Lua
+ documentation.
+
master-worker [no-exit-on-failure]
Master-worker mode. It is equivalent to the command line "-W" argument.
This mode will launch a "master" which will monitor the "workers". Using
diff --git a/src/hlua.c b/src/hlua.c
index 10d615211..a245f9b7d 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -7474,8 +7474,36 @@ static int hlua_prepend_path(struct hlua ctx, char *type, char *path)
return 0;
}
+static int hlua_config_prepend_path(char **args, int section_type, struct proxy *curpx,
+ struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char *path;
+ char *type = "path";
+ if (too_many_args(2, args, err, NULL)) {
+ return -1;
+ }
+
+ if (!(*args[1])) {
+ memprintf(err, "'%s' expects to receive a <path> as argument", args[0]);
+ return -1;
+ }
+ path = args[1];
+
+ if (*args[2]) {
+ if (strcmp(args[2], "path") != 0 && strcmp(args[2], "cpath") != 0) {
+ memprintf(err, "'%s' expects <type> to either be 'path' or 'cpath'", args[0]);
+ return -1;
+ }
+ type = args[2];
+ }
+
+ return hlua_prepend_path(gL, type, path);
+}
+
/* configuration keywords declaration */
static struct cfg_kw_list cfg_kws = {{ },{
+ { CFG_GLOBAL, "lua-prepend-path", hlua_config_prepend_path },
{ CFG_GLOBAL, "lua-load", hlua_load },
{ CFG_GLOBAL, "tune.lua.session-timeout", hlua_session_timeout },
{ CFG_GLOBAL, "tune.lua.task-timeout", hlua_task_timeout },

+ 62
- 0
net/haproxy/patches/052-MINOR-lua-Add-HLUA_PREPEND_C-PATH-build-option.patch View File

@ -0,0 +1,62 @@
commit 4fa1de1be89e1d64771a8e8cc725f991ece21819
Author: Tim Duesterhus <tim@bastelstu.be>
Date: Sun Jan 12 13:55:41 2020 +0100
MINOR: lua: Add HLUA_PREPEND_C?PATH build option
This complements the lua-prepend-path configuration option to allow
distro maintainers to add a default path for HAProxy specific Lua
libraries.
(cherry picked from commit 541fe1ec52a0f9e1912dea5b3a784406dbdfad22)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/Makefile b/Makefile
index 8399f6ca3..bb494fac3 100644
--- a/Makefile
+++ b/Makefile
@@ -88,6 +88,8 @@
# installation only.
# DOCDIR is set to "$(PREFIX)/doc/haproxy" by default and is used for
# installation only.
+# HLUA_PREPEND_PATH may be used to prepend a folder to Lua's default package.path.
+# HLUA_PREPEND_CPATH may be used to prepend a folder to Lua's default package.cpath.
#
# Other variables :
# PCRE_CONFIG : force the binary path to get pcre config (by default
@@ -546,6 +548,14 @@ LUA_INC := $(firstword $(foreach lib,lua5.3 lua53 lua,$(call check_lua_inc,$(lib
ifneq ($(LUA_INC),)
OPTIONS_CFLAGS += -I$(LUA_INC)
endif
+ifneq ($(HLUA_PREPEND_PATH),)
+OPTIONS_CFLAGS += -DHLUA_PREPEND_PATH=$(HLUA_PREPEND_PATH)
+BUILD_OPTIONS += HLUA_PREPEND_PATH=$(HLUA_PREPEND_PATH)
+endif
+ifneq ($(HLUA_PREPEND_CPATH),)
+OPTIONS_CFLAGS += -DHLUA_PREPEND_CPATH=$(HLUA_PREPEND_CPATH)
+BUILD_OPTIONS += HLUA_PREPEND_CPATH=$(HLUA_PREPEND_CPATH)
+endif
endif
OPTIONS_LDFLAGS += $(LUA_LD_FLAGS) -l$(LUA_LIB_NAME) -lm
diff --git a/src/hlua.c b/src/hlua.c
index a245f9b7d..8ace405d4 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -7651,6 +7651,16 @@ void hlua_init(void)
/* Initialise lua. */
luaL_openlibs(gL.T);
+#define HLUA_PREPEND_PATH_TOSTRING1(x) #x
+#define HLUA_PREPEND_PATH_TOSTRING(x) HLUA_PREPEND_PATH_TOSTRING1(x)
+#ifdef HLUA_PREPEND_PATH
+ hlua_prepend_path(gL, "path", HLUA_PREPEND_PATH_TOSTRING(HLUA_PREPEND_PATH));
+#endif
+#ifdef HLUA_PREPEND_CPATH
+ hlua_prepend_path(gL, "cpath", HLUA_PREPEND_PATH_TOSTRING(HLUA_PREPEND_CPATH));
+#endif
+#undef HLUA_PREPEND_PATH_TOSTRING
+#undef HLUA_PREPEND_PATH_TOSTRING1
/* Set safe environment for the initialisation. */
if (!SET_SAFE_LJMP(gL.T)) {

+ 36
- 0
net/haproxy/patches/053-BUG-MEDIUM-ssl-Dont-forget-to-free-ctx--ssl-on-failure.patch View File

@ -0,0 +1,36 @@
commit a95b302da71065e443477c2cbcd852ebb52d6db3
Author: Olivier Houchard <cognet@ci0.org>
Date: Fri Jan 24 15:17:38 2020 +0100
BUG/MEDIUM: ssl: Don't forget to free ctx->ssl on failure.
In ssl_sock_init(), if we fail to allocate the BIO, don't forget to free
the SSL *, or we'd end up with a memory leak.
This should be backported to 2.1 and 2.0.
(cherry picked from commit efe5e8e99890b24dcfb8c925d98bf82e2fdf0b9f)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
index c6888c128..6841813b5 100644
--- a/src/ssl_sock.c
+++ b/src/ssl_sock.c
@@ -5790,6 +5790,8 @@ static int ssl_sock_init(struct connection *conn, void **xprt_ctx)
}
ctx->bio = BIO_new(ha_meth);
if (!ctx->bio) {
+ SSL_free(ctx->ssl);
+ ctx->ssl = NULL;
if (may_retry--) {
pool_gc(NULL);
goto retry_connect;
@@ -5866,6 +5868,8 @@ static int ssl_sock_init(struct connection *conn, void **xprt_ctx)
ctx->bio = BIO_new(ha_meth);
if (!ctx->bio) {
+ SSL_free(ctx->ssl);
+ ctx->ssl = NULL;
if (may_retry--) {
pool_gc(NULL);
goto retry_accept;

+ 33
- 0
net/haproxy/patches/054-BUG-MINOR-tcpchecks-fix-the-connect-flags-regarding-delayed-ack.patch View File

@ -0,0 +1,33 @@
commit 77ec6260f99e7f41f22235bcc1905eb8b15c7eb8
Author: Willy Tarreau <w@1wt.eu>
Date: Fri Jan 24 17:52:37 2020 +0100
BUG/MINOR: tcpchecks: fix the connect() flags regarding delayed ack
In issue #465, we see that Coverity detected dead code in checks.c
which is in fact a missing parenthesis to build the connect() flags
consecutive to the API change in commit fdcb007ad8 ("MEDIUM: proto:
Change the prototype of the connect() method.").
The impact should be imperceptible as in the best case it may have
resulted in a missed optimization trying to save a syscall or to merge
outgoing packets.
It may be backported as far as 2.0 though it's not critical.
(cherry picked from commit 74ab7d2b80cf3930e2b3957c9234953a632c5226)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/checks.c b/src/checks.c
index 2b7fc09c6..952114e95 100644
--- a/src/checks.c
+++ b/src/checks.c
@@ -2962,7 +2962,7 @@ static int tcpcheck_main(struct check *check)
ret = SF_ERR_INTERNAL;
if (proto && proto->connect)
ret = proto->connect(conn,
- CONNECT_HAS_DATA /* I/O polling is always needed */ | (next && next->action == TCPCHK_ACT_EXPECT) ? 0 : CONNECT_DELACK_ALWAYS);
+ CONNECT_HAS_DATA /* I/O polling is always needed */ | ((next && next->action == TCPCHK_ACT_EXPECT) ? 0 : CONNECT_DELACK_ALWAYS));
if (conn_ctrl_ready(conn) &&
check->current_step->conn_opts & TCPCHK_OPT_SEND_PROXY) {
conn->send_proxy_ofs = 1;

net/haproxy/patches/000-OPENWRT-add-uclibc-support.patch → net/haproxy/patches/055-OPENWRT-add-uclibc-support.patch View File


Loading…
Cancel
Save