Browse Source

Merge pull request #10322 from jefferyto/python-2.7.17

python: Update to 2.7.17, refresh patches
lilik-openwrt-22.03
Hannu Nyman 5 years ago
committed by GitHub
parent
commit
94c344bb92
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 16 additions and 1150 deletions
  1. +5
    -5
      lang/python/python-version.mk
  2. +2
    -2
      lang/python/python/Makefile
  3. +1
    -1
      lang/python/python/patches/002-do-not-add-include-dirs-when-cross-compiling.patch
  4. +2
    -2
      lang/python/python/patches/003-do-not-compile-tests-at-build.patch
  5. +1
    -1
      lang/python/python/patches/004-do-not-write-bytes-codes.patch
  6. +1
    -1
      lang/python/python/patches/006-remove-multi-arch-and-local-paths.patch
  7. +3
    -3
      lang/python/python/patches/009-do-not-use-dblib_dir-when-cross-compiling.patch
  8. +1
    -1
      lang/python/python/patches/010-do-not-add-rt-lib-dirs-when-cross-compiling.patch
  9. +0
    -20
      lang/python/python/patches/011-fix-ssl-build-use-have-npn.patch
  10. +0
    -15
      lang/python/python/patches/017_lib2to3_fix_pyc_search.patch
  11. +0
    -155
      lang/python/python/patches/019-bpo-36216-Add-check-for-characters-in-netloc-that-normalize-to-separators-GH-12216.patch
  12. +0
    -23
      lang/python/python/patches/020-bpo-36216-Only-print-test-messages-when-verbose-GH-12291.patch
  13. +0
    -123
      lang/python/python/patches/021-2.7-bpo-35121-prefix-dot-in-domain-for-proper-subdom.patch
  14. +0
    -2
      lang/python/python/patches/021-compileall-add-recursion-option.patch
  15. +0
    -365
      lang/python/python/patches/022-bpo-30458-Disallow-control-chars-in-http-URLs-GH-13315.patch
  16. +0
    -192
      lang/python/python/patches/023-bpo-35907-Avoid-file-reading-as-disallowing-the-unnecessary-URL-scheme-in-urllib-GH-11842.patch
  17. +0
    -159
      lang/python/python/patches/027-bpo-38243-Escape-the-server-title-of-DocXMLRPCServer.patch
  18. +0
    -80
      lang/python/python/patches/028-bpo-34155-Dont-parse-domains-containing-GH-13079.patch

+ 5
- 5
lang/python/python-version.mk View File

@ -6,10 +6,10 @@
# #
PYTHON_VERSION:=2.7 PYTHON_VERSION:=2.7
PYTHON_VERSION_MICRO:=16
PYTHON_VERSION_MICRO:=17
PYTHON_SETUPTOOLS_PKG_RELEASE:=3
PYTHON_PIP_PKG_RELEASE:=2
PYTHON_SETUPTOOLS_PKG_RELEASE:=1
PYTHON_PIP_PKG_RELEASE:=1
PYTHON_SETUPTOOLS_VERSION:=40.6.2
PYTHON_PIP_VERSION:=18.1
PYTHON_SETUPTOOLS_VERSION:=41.2.0
PYTHON_PIP_VERSION:=19.2.3

+ 2
- 2
lang/python/python/Makefile View File

@ -12,11 +12,11 @@ include ../python-version.mk
PKG_NAME:=python PKG_NAME:=python
PKG_VERSION:=$(PYTHON_VERSION).$(PYTHON_VERSION_MICRO) PKG_VERSION:=$(PYTHON_VERSION).$(PYTHON_VERSION_MICRO)
PKG_RELEASE:=11
PKG_RELEASE:=1
PKG_SOURCE:=Python-$(PKG_VERSION).tar.xz PKG_SOURCE:=Python-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://www.python.org/ftp/python/$(PKG_VERSION) PKG_SOURCE_URL:=https://www.python.org/ftp/python/$(PKG_VERSION)
PKG_HASH:=f222ef602647eecb6853681156d32de4450a2c39f4de93bd5b20235f2e660ed7
PKG_HASH:=4d43f033cdbd0aa7b7023c81b0e986fd11e653b5248dac9144d508f11812ba41
PKG_LICENSE:=Python/2.0 PKG_LICENSE:=Python/2.0
PKG_LICENSE_FILES:=LICENSE Doc/copyright.rst Doc/license.rst Modules/_ctypes/darwin/LICENSE Modules/_ctypes/libffi/LICENSE Modules/_ctypes/libffi_osx/LICENSE Modules/expat/COPYING PKG_LICENSE_FILES:=LICENSE Doc/copyright.rst Doc/license.rst Modules/_ctypes/darwin/LICENSE Modules/_ctypes/libffi/LICENSE Modules/_ctypes/libffi_osx/LICENSE Modules/expat/COPYING


+ 1
- 1
lang/python/python/patches/002-do-not-add-include-dirs-when-cross-compiling.patch View File

@ -1,6 +1,6 @@
--- a/setup.py --- a/setup.py
+++ b/setup.py +++ b/setup.py
@@ -497,7 +497,8 @@ class PyBuildExt(build_ext):
@@ -543,7 +543,8 @@ class PyBuildExt(build_ext):
add_dir_to_list(dir_list, directory) add_dir_to_list(dir_list, directory)
if os.path.normpath(sys.prefix) != '/usr' \ if os.path.normpath(sys.prefix) != '/usr' \


+ 2
- 2
lang/python/python/patches/003-do-not-compile-tests-at-build.patch View File

@ -1,6 +1,6 @@
--- a/Makefile.pre.in --- a/Makefile.pre.in
+++ b/Makefile.pre.in +++ b/Makefile.pre.in
@@ -1127,6 +1127,7 @@ libinstall: build_all $(srcdir)/Lib/$(PL
@@ -1133,6 +1133,7 @@ libinstall: build_all $(srcdir)/Lib/$(PL
done; \ done; \
done done
$(INSTALL_DATA) $(srcdir)/LICENSE $(DESTDIR)$(LIBDEST)/LICENSE.txt $(INSTALL_DATA) $(srcdir)/LICENSE $(DESTDIR)$(LIBDEST)/LICENSE.txt
@ -8,7 +8,7 @@
if test -d $(DESTDIR)$(LIBDEST)/distutils/tests; then \ if test -d $(DESTDIR)$(LIBDEST)/distutils/tests; then \
$(INSTALL_DATA) $(srcdir)/Modules/xxmodule.c \ $(INSTALL_DATA) $(srcdir)/Modules/xxmodule.c \
$(DESTDIR)$(LIBDEST)/distutils/tests ; \ $(DESTDIR)$(LIBDEST)/distutils/tests ; \
@@ -1153,6 +1154,7 @@ libinstall: build_all $(srcdir)/Lib/$(PL
@@ -1159,6 +1160,7 @@ libinstall: build_all $(srcdir)/Lib/$(PL
$(PYTHON_FOR_BUILD) -m lib2to3.pgen2.driver $(DESTDIR)$(LIBDEST)/lib2to3/Grammar.txt $(PYTHON_FOR_BUILD) -m lib2to3.pgen2.driver $(DESTDIR)$(LIBDEST)/lib2to3/Grammar.txt
-PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \ -PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \
$(PYTHON_FOR_BUILD) -m lib2to3.pgen2.driver $(DESTDIR)$(LIBDEST)/lib2to3/PatternGrammar.txt $(PYTHON_FOR_BUILD) -m lib2to3.pgen2.driver $(DESTDIR)$(LIBDEST)/lib2to3/PatternGrammar.txt


+ 1
- 1
lang/python/python/patches/004-do-not-write-bytes-codes.patch View File

@ -3,7 +3,7 @@
@@ -71,7 +71,7 @@ int Py_InteractiveFlag; /* Needed by Py_ @@ -71,7 +71,7 @@ int Py_InteractiveFlag; /* Needed by Py_
int Py_InspectFlag; /* Needed to determine whether to exit at SystemExit */ int Py_InspectFlag; /* Needed to determine whether to exit at SystemExit */
int Py_NoSiteFlag; /* Suppress 'import site' */ int Py_NoSiteFlag; /* Suppress 'import site' */
int Py_BytesWarningFlag; /* Warn on str(bytes) and str(buffer) */
int Py_BytesWarningFlag; /* Warn on comparison between bytearray and unicode */
-int Py_DontWriteBytecodeFlag; /* Suppress writing bytecode files (*.py[co]) */ -int Py_DontWriteBytecodeFlag; /* Suppress writing bytecode files (*.py[co]) */
+int Py_DontWriteBytecodeFlag = 1; /* Suppress writing bytecode files (*.py[co]) */ +int Py_DontWriteBytecodeFlag = 1; /* Suppress writing bytecode files (*.py[co]) */
int Py_UseClassExceptionsFlag = 1; /* Needed by bltinmodule.c: deprecated */ int Py_UseClassExceptionsFlag = 1; /* Needed by bltinmodule.c: deprecated */


+ 1
- 1
lang/python/python/patches/006-remove-multi-arch-and-local-paths.patch View File

@ -1,6 +1,6 @@
--- a/setup.py --- a/setup.py
+++ b/setup.py +++ b/setup.py
@@ -454,13 +454,8 @@ class PyBuildExt(build_ext):
@@ -500,13 +500,8 @@ class PyBuildExt(build_ext):
os.unlink(tmpfile) os.unlink(tmpfile)
def detect_modules(self): def detect_modules(self):


+ 3
- 3
lang/python/python/patches/009-do-not-use-dblib_dir-when-cross-compiling.patch View File

@ -1,6 +1,6 @@
--- a/setup.py --- a/setup.py
+++ b/setup.py +++ b/setup.py
@@ -1083,6 +1083,7 @@ class PyBuildExt(build_ext):
@@ -1129,6 +1129,7 @@ class PyBuildExt(build_ext):
if db_setup_debug: print "db lib: ", dblib, "not found" if db_setup_debug: print "db lib: ", dblib, "not found"
except db_found: except db_found:
@ -8,7 +8,7 @@
if db_setup_debug: if db_setup_debug:
print "bsddb using BerkeleyDB lib:", db_ver, dblib print "bsddb using BerkeleyDB lib:", db_ver, dblib
print "bsddb lib dir:", dblib_dir, " inc dir:", db_incdir print "bsddb lib dir:", dblib_dir, " inc dir:", db_incdir
@@ -1097,7 +1098,7 @@ class PyBuildExt(build_ext):
@@ -1143,7 +1144,7 @@ class PyBuildExt(build_ext):
exts.append(Extension('_bsddb', ['_bsddb.c'], exts.append(Extension('_bsddb', ['_bsddb.c'],
depends = ['bsddb.h'], depends = ['bsddb.h'],
library_dirs=dblib_dir, library_dirs=dblib_dir,
@ -17,7 +17,7 @@
include_dirs=db_incs, include_dirs=db_incs,
libraries=dblibs)) libraries=dblibs))
else: else:
@@ -1308,10 +1309,11 @@ class PyBuildExt(build_ext):
@@ -1354,10 +1355,11 @@ class PyBuildExt(build_ext):
break break
elif cand == "bdb": elif cand == "bdb":
if db_incs is not None: if db_incs is not None:


+ 1
- 1
lang/python/python/patches/010-do-not-add-rt-lib-dirs-when-cross-compiling.patch View File

@ -1,6 +1,6 @@
--- a/setup.py --- a/setup.py
+++ b/setup.py +++ b/setup.py
@@ -463,8 +463,9 @@ class PyBuildExt(build_ext):
@@ -509,8 +509,9 @@ class PyBuildExt(build_ext):
# directly since an inconsistently reproducible issue comes up where # directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed # the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3). # into configure and stored in the Makefile (issue found on OS X 10.3).


+ 0
- 20
lang/python/python/patches/011-fix-ssl-build-use-have-npn.patch View File

@ -1,20 +0,0 @@
--- a/Modules/_ssl.c
+++ b/Modules/_ssl.c
@@ -1590,7 +1590,7 @@ static PyObject *PySSL_version(PySSLSocket *self)
return PyUnicode_FromString(version);
}
-#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+#if HAVE_NPN
static PyObject *PySSL_selected_npn_protocol(PySSLSocket *self) {
const unsigned char *out;
unsigned int outlen;
@@ -2118,7 +2118,7 @@ static PyMethodDef PySSLMethods[] = {
PySSL_peercert_doc},
{"cipher", (PyCFunction)PySSL_cipher, METH_NOARGS},
{"version", (PyCFunction)PySSL_version, METH_NOARGS},
-#ifdef OPENSSL_NPN_NEGOTIATED
+#if HAVE_NPN
{"selected_npn_protocol", (PyCFunction)PySSL_selected_npn_protocol, METH_NOARGS},
#endif
#if HAVE_ALPN

+ 0
- 15
lang/python/python/patches/017_lib2to3_fix_pyc_search.patch View File

@ -1,15 +0,0 @@
--- a/Lib/lib2to3/refactor.py
+++ b/Lib/lib2to3/refactor.py
@@ -40,6 +40,12 @@ def get_all_fix_names(fixer_pkg, remove_prefix=True):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
+ if name.startswith("fix_") and name.endswith(".pyc"):
+ if remove_prefix:
+ name = name[4:]
+ name = name[:-4]
+ if name not in fix_names:
+ fix_names.append(name)
return fix_names

+ 0
- 155
lang/python/python/patches/019-bpo-36216-Add-check-for-characters-in-netloc-that-normalize-to-separators-GH-12216.patch View File

@ -1,155 +0,0 @@
From 3e3669c9c41a27e1466e2c28b3906e3dd0ce3e7e Mon Sep 17 00:00:00 2001
From: Steve Dower <steve.dower@python.org>
Date: Thu, 7 Mar 2019 08:25:22 -0800
Subject: [PATCH] bpo-36216: Add check for characters in netloc that normalize
to separators (GH-12201)
---
Doc/library/urlparse.rst | 20 ++++++++++++++++
Lib/test/test_urlparse.py | 24 +++++++++++++++++++
Lib/urlparse.py | 17 +++++++++++++
.../2019-03-06-09-38-40.bpo-36216.6q1m4a.rst | 3 +++
4 files changed, 64 insertions(+)
create mode 100644 Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
diff --git a/Doc/library/urlparse.rst b/Doc/library/urlparse.rst
index 22249da54fbb..0989c88c3022 100644
--- a/Doc/library/urlparse.rst
+++ b/Doc/library/urlparse.rst
@@ -119,12 +119,22 @@ The :mod:`urlparse` module defines the following functions:
See section :ref:`urlparse-result-object` for more information on the result
object.
+ Characters in the :attr:`netloc` attribute that decompose under NFKC
+ normalization (as used by the IDNA encoding) into any of ``/``, ``?``,
+ ``#``, ``@``, or ``:`` will raise a :exc:`ValueError`. If the URL is
+ decomposed before parsing, or is not a Unicode string, no error will be
+ raised.
+
.. versionchanged:: 2.5
Added attributes to return value.
.. versionchanged:: 2.7
Added IPv6 URL parsing capabilities.
+ .. versionchanged:: 2.7.17
+ Characters that affect netloc parsing under NFKC normalization will
+ now raise :exc:`ValueError`.
+
.. function:: parse_qs(qs[, keep_blank_values[, strict_parsing[, max_num_fields]]])
@@ -232,11 +242,21 @@ The :mod:`urlparse` module defines the following functions:
See section :ref:`urlparse-result-object` for more information on the result
object.
+ Characters in the :attr:`netloc` attribute that decompose under NFKC
+ normalization (as used by the IDNA encoding) into any of ``/``, ``?``,
+ ``#``, ``@``, or ``:`` will raise a :exc:`ValueError`. If the URL is
+ decomposed before parsing, or is not a Unicode string, no error will be
+ raised.
+
.. versionadded:: 2.2
.. versionchanged:: 2.5
Added attributes to return value.
+ .. versionchanged:: 2.7.17
+ Characters that affect netloc parsing under NFKC normalization will
+ now raise :exc:`ValueError`.
+
.. function:: urlunsplit(parts)
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index 4e1ded73c266..73b0228ea8e3 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -1,4 +1,6 @@
from test import test_support
+import sys
+import unicodedata
import unittest
import urlparse
@@ -624,6 +626,28 @@ def test_portseparator(self):
self.assertEqual(urlparse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
+ def test_urlsplit_normalization(self):
+ # Certain characters should never occur in the netloc,
+ # including under normalization.
+ # Ensure that ALL of them are detected and cause an error
+ illegal_chars = u'/:#?@'
+ hex_chars = {'{:04X}'.format(ord(c)) for c in illegal_chars}
+ denorm_chars = [
+ c for c in map(unichr, range(128, sys.maxunicode))
+ if (hex_chars & set(unicodedata.decomposition(c).split()))
+ and c not in illegal_chars
+ ]
+ # Sanity check that we found at least one such character
+ self.assertIn(u'\u2100', denorm_chars)
+ self.assertIn(u'\uFF03', denorm_chars)
+
+ for scheme in [u"http", u"https", u"ftp"]:
+ for c in denorm_chars:
+ url = u"{}://netloc{}false.netloc/path".format(scheme, c)
+ print "Checking %r" % url
+ with self.assertRaises(ValueError):
+ urlparse.urlsplit(url)
+
def test_main():
test_support.run_unittest(UrlParseTestCase)
diff --git a/Lib/urlparse.py b/Lib/urlparse.py
index f7c2b032b097..54eda08651ab 100644
--- a/Lib/urlparse.py
+++ b/Lib/urlparse.py
@@ -165,6 +165,21 @@ def _splitnetloc(url, start=0):
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
+def _checknetloc(netloc):
+ if not netloc or not isinstance(netloc, unicode):
+ return
+ # looking for characters like \u2100 that expand to 'a/c'
+ # IDNA uses NFKC equivalence, so normalize for this check
+ import unicodedata
+ netloc2 = unicodedata.normalize('NFKC', netloc)
+ if netloc == netloc2:
+ return
+ _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
+ for c in '/?#@:':
+ if c in netloc2:
+ raise ValueError("netloc '" + netloc2 + "' contains invalid " +
+ "characters under NFKC normalization")
+
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
@@ -193,6 +208,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
+ _checknetloc(netloc)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
@@ -216,6 +232,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
+ _checknetloc(netloc)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
diff --git a/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst b/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
new file mode 100644
index 000000000000..1e1ad92c6feb
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2019-03-06-09-38-40.bpo-36216.6q1m4a.rst
@@ -0,0 +1,3 @@
+Changes urlsplit() to raise ValueError when the URL contains characters that
+decompose under IDNA encoding (NFKC-normalization) into characters that
+affect how the URL is parsed.
\ No newline at end of file

+ 0
- 23
lang/python/python/patches/020-bpo-36216-Only-print-test-messages-when-verbose-GH-12291.patch View File

@ -1,23 +0,0 @@
From 06b5ee585d6e76bdbb4002f642d864d860cbbd2b Mon Sep 17 00:00:00 2001
From: Steve Dower <steve.dower@python.org>
Date: Tue, 12 Mar 2019 08:23:33 -0700
Subject: [PATCH] bpo-36216: Only print test messages when verbose
---
Lib/test/test_urlparse.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index 73b0228ea8e3..1830d0b28688 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -644,7 +644,8 @@ def test_urlsplit_normalization(self):
for scheme in [u"http", u"https", u"ftp"]:
for c in denorm_chars:
url = u"{}://netloc{}false.netloc/path".format(scheme, c)
- print "Checking %r" % url
+ if test_support.verbose:
+ print "Checking %r" % url
with self.assertRaises(ValueError):
urlparse.urlsplit(url)

+ 0
- 123
lang/python/python/patches/021-2.7-bpo-35121-prefix-dot-in-domain-for-proper-subdom.patch View File

@ -1,123 +0,0 @@
From 979daae300916adb399ab5b51410b6ebd0888f13 Mon Sep 17 00:00:00 2001
From: Xtreak <tir.karthi@gmail.com>
Date: Sat, 15 Jun 2019 20:59:43 +0530
Subject: [PATCH] [2.7] bpo-35121: prefix dot in domain for proper subdomain
validation (GH-10258) (GH-13426)
This is a manual backport of ca7fe5063593958e5efdf90f068582837f07bd14 since 2.7 has `http.cookiejar` in `cookielib`
https://bugs.python.org/issue35121
---
Lib/cookielib.py | 13 ++++++--
Lib/test/test_cookielib.py | 30 +++++++++++++++++++
.../2019-05-20-00-35-12.bpo-35121.RRi-HU.rst | 4 +++
3 files changed, 45 insertions(+), 2 deletions(-)
create mode 100644 Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst
diff --git a/Lib/cookielib.py b/Lib/cookielib.py
index 2dd7c48728..0b471a42f2 100644
--- a/Lib/cookielib.py
+++ b/Lib/cookielib.py
@@ -1139,6 +1139,11 @@ class DefaultCookiePolicy(CookiePolicy):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
+ if domain and not domain.startswith("."):
+ dotdomain = "." + domain
+ else:
+ dotdomain = domain
+
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
@@ -1151,7 +1156,7 @@ class DefaultCookiePolicy(CookiePolicy):
_debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
- if cookie.version == 0 and not ("."+erhn).endswith(domain):
+ if cookie.version == 0 and not ("."+erhn).endswith(dotdomain):
_debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
@@ -1165,7 +1170,11 @@ class DefaultCookiePolicy(CookiePolicy):
req_host = "."+req_host
if not erhn.startswith("."):
erhn = "."+erhn
- if not (req_host.endswith(domain) or erhn.endswith(domain)):
+ if domain and not domain.startswith("."):
+ dotdomain = "." + domain
+ else:
+ dotdomain = domain
+ if not (req_host.endswith(dotdomain) or erhn.endswith(dotdomain)):
#_debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
diff --git a/Lib/test/test_cookielib.py b/Lib/test/test_cookielib.py
index f2dd9727d1..7f7ff614d6 100644
--- a/Lib/test/test_cookielib.py
+++ b/Lib/test/test_cookielib.py
@@ -368,6 +368,7 @@ class CookieTests(TestCase):
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
+ ("http://foo.bar.com/", "bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
@@ -378,6 +379,8 @@ class CookieTests(TestCase):
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
+ ("http://barfoo.com", ".foo.com", False),
+ ("http://barfoo.com", "foo.com", False),
]:
request = urllib2.Request(url)
r = pol.domain_return_ok(domain, request)
@@ -938,6 +941,33 @@ class CookieTests(TestCase):
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
+ c.clear()
+
+ pol.set_blocked_domains([])
+ req = Request("http://acme.com/")
+ res = FakeResponse(headers, "http://acme.com/")
+ cookies = c.make_cookies(res, req)
+ c.extract_cookies(res, req)
+ self.assertEqual(len(c), 1)
+
+ req = Request("http://acme.com/")
+ c.add_cookie_header(req)
+ self.assertTrue(req.has_header("Cookie"))
+
+ req = Request("http://badacme.com/")
+ c.add_cookie_header(req)
+ self.assertFalse(pol.return_ok(cookies[0], req))
+ self.assertFalse(req.has_header("Cookie"))
+
+ p = pol.set_blocked_domains(["acme.com"])
+ req = Request("http://acme.com/")
+ c.add_cookie_header(req)
+ self.assertFalse(req.has_header("Cookie"))
+
+ req = Request("http://badacme.com/")
+ c.add_cookie_header(req)
+ self.assertFalse(req.has_header("Cookie"))
+
def test_secure(self):
from cookielib import CookieJar, DefaultCookiePolicy
diff --git a/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst b/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst
new file mode 100644
index 0000000000..7725180616
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2019-05-20-00-35-12.bpo-35121.RRi-HU.rst
@@ -0,0 +1,4 @@
+Don't send cookies of domain A without Domain attribute to domain B when
+domain A is a suffix match of domain B while using a cookiejar with
+:class:`cookielib.DefaultCookiePolicy` policy. Patch by Karthikeyan
+Singaravelan.
--
2.20.1

+ 0
- 2
lang/python/python/patches/021-compileall-add-recursion-option.patch View File

@ -1,5 +1,3 @@
diff --git a/Lib/compileall.py b/Lib/compileall.py
index 5cfa8bed3f..8716c9c0ca 100644
--- a/Lib/compileall.py --- a/Lib/compileall.py
+++ b/Lib/compileall.py +++ b/Lib/compileall.py
@@ -152,10 +152,10 @@ def main(): @@ -152,10 +152,10 @@ def main():


+ 0
- 365
lang/python/python/patches/022-bpo-30458-Disallow-control-chars-in-http-URLs-GH-13315.patch View File

@ -1,365 +0,0 @@
From 8af9afdb2938f3d993eaea549c7bc5fbe75bb7e7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= <miro@hroncok.cz>
Date: Tue, 7 May 2019 17:28:47 +0200
Subject: [PATCH 1/2] bpo-30458: Disallow control chars in http URLs.
(GH-12755) (GH-13154)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Disallow control chars in http URLs in urllib2.urlopen. This
addresses a potential security problem for applications that do not
sanity check their URLs where http request headers could be injected.
Disable https related urllib tests on a build without ssl (GH-13032)
These tests require an SSL enabled build. Skip these tests when
python is built without SSL to fix test failures.
Use httplib.InvalidURL instead of ValueError as the new error case's
exception. (GH-13044)
Backport Co-Authored-By: Miro Hrončok <miro@hroncok.cz>
(cherry picked from commit 7e200e0763f5b71c199aaf98bd5588f291585619)
Notes on backport to Python 2.7:
* test_urllib tests urllib.urlopen() which quotes the URL and so is
not vulerable to HTTP Header Injection.
* Add tests to test_urllib2 on urllib2.urlopen().
* Reject non-ASCII characters: range 0x80-0xff.
---
Lib/httplib.py | 16 +++++
Lib/test/test_urllib.py | 31 +++++++++
Lib/test/test_urllib2.py | 63 ++++++++++++++++---
Lib/test/test_xmlrpc.py | 8 ++-
.../2019-04-10-08-53-30.bpo-30458.51E-DA.rst | 1 +
5 files changed, 111 insertions(+), 8 deletions(-)
create mode 100644 Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst
diff --git a/Lib/httplib.py b/Lib/httplib.py
index 60a8fb4e355f..1b41c346e090 100644
--- a/Lib/httplib.py
+++ b/Lib/httplib.py
@@ -247,6 +247,16 @@
_is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match
_is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search
+# These characters are not allowed within HTTP URL paths.
+# See https://tools.ietf.org/html/rfc3986#section-3.3 and the
+# https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.
+# Prevents CVE-2019-9740. Includes control characters such as \r\n.
+# Restrict non-ASCII characters above \x7f (0x80-0xff).
+_contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f-\xff]')
+# Arguably only these _should_ allowed:
+# _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
+# We are more lenient for assumed real world compatibility purposes.
+
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
@@ -927,6 +937,12 @@ def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
self._method = method
if not url:
url = '/'
+ # Prevent CVE-2019-9740.
+ match = _contains_disallowed_url_pchar_re.search(url)
+ if match:
+ raise InvalidURL("URL can't contain control characters. %r "
+ "(found at least %r)"
+ % (url, match.group()))
hdr = '%s %s %s' % (method, url, self._http_vsn_str)
self._output(hdr)
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
index 1ce9201c0693..bdc6e78f8678 100644
--- a/Lib/test/test_urllib.py
+++ b/Lib/test/test_urllib.py
@@ -9,6 +9,10 @@
import sys
import mimetools
import tempfile
+try:
+ import ssl
+except ImportError:
+ ssl = None
from test import test_support
from base64 import b64encode
@@ -257,6 +261,33 @@ def test_url_fragment(self):
finally:
self.unfakehttp()
+ @unittest.skipUnless(ssl, "ssl module required")
+ def test_url_with_control_char_rejected(self):
+ for char_no in range(0, 0x21) + range(0x7f, 0x100):
+ char = chr(char_no)
+ schemeless_url = "//localhost:7777/test%s/" % char
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
+ try:
+ # urllib quotes the URL so there is no injection.
+ resp = urllib.urlopen("http:" + schemeless_url)
+ self.assertNotIn(char, resp.geturl())
+ finally:
+ self.unfakehttp()
+
+ @unittest.skipUnless(ssl, "ssl module required")
+ def test_url_with_newline_header_injection_rejected(self):
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
+ host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
+ schemeless_url = "//" + host + ":8080/test/?test=a"
+ try:
+ # urllib quotes the URL so there is no injection.
+ resp = urllib.urlopen("http:" + schemeless_url)
+ self.assertNotIn(' ', resp.geturl())
+ self.assertNotIn('\r', resp.geturl())
+ self.assertNotIn('\n', resp.geturl())
+ finally:
+ self.unfakehttp()
+
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp('''HTTP/1.1 401 Authentication Required
diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py
index 6d24d5ddf83c..d13f86f68bae 100644
--- a/Lib/test/test_urllib2.py
+++ b/Lib/test/test_urllib2.py
@@ -1,5 +1,5 @@
import unittest
-from test import test_support
+from test import support
from test import test_urllib
import os
@@ -15,6 +15,9 @@
except ImportError:
ssl = None
+from test.test_urllib import FakeHTTPMixin
+
+
# XXX
# Request
# CacheFTPHandler (hard to write)
@@ -683,7 +686,7 @@ def test_file(self):
h = urllib2.FileHandler()
o = h.parent = MockOpener()
- TESTFN = test_support.TESTFN
+ TESTFN = support.TESTFN
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = "hello, world\n"
urls = [
@@ -1154,7 +1157,7 @@ def test_basic_auth_with_unquoted_realm(self):
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
msg = "Basic Auth Realm was unquoted"
- with test_support.check_warnings((msg, UserWarning)):
+ with support.check_warnings((msg, UserWarning)):
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
@@ -1262,7 +1265,7 @@ def _test_basic_auth(self, opener, auth_handler, auth_header,
self.assertEqual(len(http_handler.requests), 1)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
-class MiscTests(unittest.TestCase):
+class MiscTests(unittest.TestCase, FakeHTTPMixin):
def test_build_opener(self):
class MyHTTPHandler(urllib2.HTTPHandler): pass
@@ -1317,6 +1320,52 @@ def test_unsupported_algorithm(self):
"Unsupported digest authentication algorithm 'invalid'"
)
+ @unittest.skipUnless(ssl, "ssl module required")
+ def test_url_with_control_char_rejected(self):
+ for char_no in range(0, 0x21) + range(0x7f, 0x100):
+ char = chr(char_no)
+ schemeless_url = "//localhost:7777/test%s/" % char
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
+ try:
+ # We explicitly test urllib.request.urlopen() instead of the top
+ # level 'def urlopen()' function defined in this... (quite ugly)
+ # test suite. They use different url opening codepaths. Plain
+ # urlopen uses FancyURLOpener which goes via a codepath that
+ # calls urllib.parse.quote() on the URL which makes all of the
+ # above attempts at injection within the url _path_ safe.
+ escaped_char_repr = repr(char).replace('\\', r'\\')
+ InvalidURL = httplib.InvalidURL
+ with self.assertRaisesRegexp(
+ InvalidURL, "contain control.*" + escaped_char_repr):
+ urllib2.urlopen("http:" + schemeless_url)
+ with self.assertRaisesRegexp(
+ InvalidURL, "contain control.*" + escaped_char_repr):
+ urllib2.urlopen("https:" + schemeless_url)
+ finally:
+ self.unfakehttp()
+
+ @unittest.skipUnless(ssl, "ssl module required")
+ def test_url_with_newline_header_injection_rejected(self):
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
+ host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
+ schemeless_url = "//" + host + ":8080/test/?test=a"
+ try:
+ # We explicitly test urllib.request.urlopen() instead of the top
+ # level 'def urlopen()' function defined in this... (quite ugly)
+ # test suite. They use different url opening codepaths. Plain
+ # urlopen uses FancyURLOpener which goes via a codepath that
+ # calls urllib.parse.quote() on the URL which makes all of the
+ # above attempts at injection within the url _path_ safe.
+ InvalidURL = httplib.InvalidURL
+ with self.assertRaisesRegexp(
+ InvalidURL, r"contain control.*\\r.*(found at least . .)"):
+ urllib2.urlopen("http:" + schemeless_url)
+ with self.assertRaisesRegexp(InvalidURL, r"contain control.*\\n"):
+ urllib2.urlopen("https:" + schemeless_url)
+ finally:
+ self.unfakehttp()
+
+
class RequestTests(unittest.TestCase):
@@ -1412,14 +1461,14 @@ def test_HTTPError_interface_call(self):
def test_main(verbose=None):
from test import test_urllib2
- test_support.run_doctest(test_urllib2, verbose)
- test_support.run_doctest(urllib2, verbose)
+ support.run_doctest(test_urllib2, verbose)
+ support.run_doctest(urllib2, verbose)
tests = (TrivialTests,
OpenerDirectorTests,
HandlerTests,
MiscTests,
RequestTests)
- test_support.run_unittest(*tests)
+ support.run_unittest(*tests)
if __name__ == "__main__":
test_main(verbose=True)
diff --git a/Lib/test/test_xmlrpc.py b/Lib/test/test_xmlrpc.py
index 36b3be67fd6b..90ccb30716ff 100644
--- a/Lib/test/test_xmlrpc.py
+++ b/Lib/test/test_xmlrpc.py
@@ -659,7 +659,13 @@ def test_dotted_attribute(self):
def test_partial_post(self):
# Check that a partial POST doesn't make the server loop: issue #14001.
conn = httplib.HTTPConnection(ADDR, PORT)
- conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye')
+ conn.send('POST /RPC2 HTTP/1.0\r\n'
+ 'Content-Length: 100\r\n\r\n'
+ 'bye HTTP/1.1\r\n'
+ 'Host: %s:%s\r\n'
+ 'Accept-Encoding: identity\r\n'
+ 'Content-Length: 0\r\n\r\n'
+ % (ADDR, PORT))
conn.close()
class SimpleServerEncodingTestCase(BaseServerTestCase):
diff --git a/Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst b/Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst
new file mode 100644
index 000000000000..47cb899df1af
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2019-04-10-08-53-30.bpo-30458.51E-DA.rst
@@ -0,0 +1 @@
+Address CVE-2019-9740 by disallowing URL paths with embedded whitespace or control characters through into the underlying http client request. Such potentially malicious header injection URLs now cause an httplib.InvalidURL exception to be raised.
From 9f8ae2a2e4b836fe3136e84e55b8de62cb40904f Mon Sep 17 00:00:00 2001
From: Victor Stinner <vstinner@redhat.com>
Date: Mon, 20 May 2019 16:53:15 +0200
Subject: [PATCH 2/2] Address Gregory's comments
---
Lib/test/test_urllib.py | 6 ------
Lib/test/test_urllib2.py | 14 +++++++-------
2 files changed, 7 insertions(+), 13 deletions(-)
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
index bdc6e78f8678..d7778d4194f3 100644
--- a/Lib/test/test_urllib.py
+++ b/Lib/test/test_urllib.py
@@ -9,10 +9,6 @@
import sys
import mimetools
import tempfile
-try:
- import ssl
-except ImportError:
- ssl = None
from test import test_support
from base64 import b64encode
@@ -261,7 +257,6 @@ def test_url_fragment(self):
finally:
self.unfakehttp()
- @unittest.skipUnless(ssl, "ssl module required")
def test_url_with_control_char_rejected(self):
for char_no in range(0, 0x21) + range(0x7f, 0x100):
char = chr(char_no)
@@ -274,7 +269,6 @@ def test_url_with_control_char_rejected(self):
finally:
self.unfakehttp()
- @unittest.skipUnless(ssl, "ssl module required")
def test_url_with_newline_header_injection_rejected(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py
index d13f86f68bae..9531818e16b2 100644
--- a/Lib/test/test_urllib2.py
+++ b/Lib/test/test_urllib2.py
@@ -1,5 +1,5 @@
import unittest
-from test import support
+from test import test_support
from test import test_urllib
import os
@@ -686,7 +686,7 @@ def test_file(self):
h = urllib2.FileHandler()
o = h.parent = MockOpener()
- TESTFN = support.TESTFN
+ TESTFN = test_support.TESTFN
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = "hello, world\n"
urls = [
@@ -1157,7 +1157,7 @@ def test_basic_auth_with_unquoted_realm(self):
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
msg = "Basic Auth Realm was unquoted"
- with support.check_warnings((msg, UserWarning)):
+ with test_support.check_warnings((msg, UserWarning)):
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
@@ -1350,7 +1350,7 @@ def test_url_with_newline_header_injection_rejected(self):
host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
schemeless_url = "//" + host + ":8080/test/?test=a"
try:
- # We explicitly test urllib.request.urlopen() instead of the top
+ # We explicitly test urllib2.urlopen() instead of the top
# level 'def urlopen()' function defined in this... (quite ugly)
# test suite. They use different url opening codepaths. Plain
# urlopen uses FancyURLOpener which goes via a codepath that
@@ -1461,14 +1461,14 @@ def test_HTTPError_interface_call(self):
def test_main(verbose=None):
from test import test_urllib2
- support.run_doctest(test_urllib2, verbose)
- support.run_doctest(urllib2, verbose)
+ test_support.run_doctest(test_urllib2, verbose)
+ test_support.run_doctest(urllib2, verbose)
tests = (TrivialTests,
OpenerDirectorTests,
HandlerTests,
MiscTests,
RequestTests)
- support.run_unittest(*tests)
+ test_support.run_unittest(*tests)
if __name__ == "__main__":
test_main(verbose=True)

+ 0
- 192
lang/python/python/patches/023-bpo-35907-Avoid-file-reading-as-disallowing-the-unnecessary-URL-scheme-in-urllib-GH-11842.patch View File

@ -1,192 +0,0 @@
From 8f99cc799e4393bf1112b9395b2342f81b3f45ef Mon Sep 17 00:00:00 2001
From: push0ebp <push0ebp@shl-MacBook-Pro.local>
Date: Thu, 14 Feb 2019 02:05:46 +0900
Subject: [PATCH 1/6] bpo-35907: Avoid file reading as disallowing the
unnecessary URL scheme in urllib
---
Lib/test/test_urllib.py | 12 ++++++++++++
Lib/urllib.py | 5 ++++-
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
index 1ce9201c0693..e5f210e62a18 100644
--- a/Lib/test/test_urllib.py
+++ b/Lib/test/test_urllib.py
@@ -1023,6 +1023,18 @@ def open_spam(self, url):
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
+ def test_local_file_open(self):
+ class DummyURLopener(urllib.URLopener):
+ def open_local_file(self, url):
+ return url
+ self.assertEqual(DummyURLopener().open(
+ 'local-file://example'), '//example')
+ self.assertEqual(DummyURLopener().open(
+ 'local_file://example'), '//example')
+ self.assertRaises(IOError, urllib.urlopen,
+ 'local-file://example')
+ self.assertRaises(IOError, urllib.urlopen,
+ 'local_file://example')
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
diff --git a/Lib/urllib.py b/Lib/urllib.py
index d85504a5cb7e..a24e9a5c68fb 100644
--- a/Lib/urllib.py
+++ b/Lib/urllib.py
@@ -203,7 +203,10 @@ def open(self, fullurl, data=None):
name = 'open_' + urltype
self.type = urltype
name = name.replace('-', '_')
- if not hasattr(self, name):
+
+ # bpo-35907: # disallow the file reading with the type not allowed
+ if not hasattr(self, name) or \
+ (self == _urlopener and name == 'open_local_file'):
if proxy:
return self.open_unknown_proxy(proxy, fullurl, data)
else:
From b86392511acd4cd30dc68711fa22f9f93228715a Mon Sep 17 00:00:00 2001
From: "blurb-it[bot]" <blurb-it[bot]@users.noreply.github.com>
Date: Wed, 13 Feb 2019 17:21:11 +0000
Subject: [PATCH 2/6] =?UTF-8?q?=F0=9F=93=9C=F0=9F=A4=96=20Added=20by=20blu?=
=?UTF-8?q?rb=5Fit.?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst | 1 +
1 file changed, 1 insertion(+)
create mode 100644 Misc/NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst
diff --git a/Misc/NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst b/Misc/NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst
new file mode 100644
index 000000000000..8118a5f40583
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst
@@ -0,0 +1 @@
+Avoid file reading as disallowing the unnecessary URL scheme in urllib.urlopen
\ No newline at end of file
From f20a31c7364fecdd3197e0180a5857e23aa15065 Mon Sep 17 00:00:00 2001
From: SH <push0ebp@gmail.com>
Date: Fri, 17 May 2019 02:31:18 +0900
Subject: [PATCH 3/6] Update 2019-02-13-17-21-10.bpo-35907.ckk2zg.rst
Add prefix "CVE-2019-9948: "
---
.../next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Misc/NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst b/Misc/NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst
index 8118a5f40583..bb187d8d65a5 100644
--- a/Misc/NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst
+++ b/Misc/NEWS.d/next/Library/2019-02-13-17-21-10.bpo-35907.ckk2zg.rst
@@ -1 +1 @@
-Avoid file reading as disallowing the unnecessary URL scheme in urllib.urlopen
\ No newline at end of file
+CVE-2019-9948: Avoid file reading as disallowing the unnecessary URL scheme in urllib.urlopen
From 179a5f75f1121dab271fe8f90eb35145f9dcbbda Mon Sep 17 00:00:00 2001
From: Sihoon Lee <push0ebp@gmail.com>
Date: Fri, 17 May 2019 02:41:06 +0900
Subject: [PATCH 4/6] Update test_urllib.py and urllib.py\nchange assertEqual
into assertRasies in DummyURLopener test, and simplify mitigation
---
Lib/test/test_urllib.py | 11 +++--------
Lib/urllib.py | 4 ++--
2 files changed, 5 insertions(+), 10 deletions(-)
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
index e5f210e62a18..1e23dfb0bb16 100644
--- a/Lib/test/test_urllib.py
+++ b/Lib/test/test_urllib.py
@@ -1027,14 +1027,9 @@ def test_local_file_open(self):
class DummyURLopener(urllib.URLopener):
def open_local_file(self, url):
return url
- self.assertEqual(DummyURLopener().open(
- 'local-file://example'), '//example')
- self.assertEqual(DummyURLopener().open(
- 'local_file://example'), '//example')
- self.assertRaises(IOError, urllib.urlopen,
- 'local-file://example')
- self.assertRaises(IOError, urllib.urlopen,
- 'local_file://example')
+ for url in ('local_file://example', 'local-file://example'):
+ self.assertRaises(IOError, DummyURLopener().open, url)
+ self.assertRaises(IOError, urllib.urlopen, url)
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
diff --git a/Lib/urllib.py b/Lib/urllib.py
index a24e9a5c68fb..39b834054e9e 100644
--- a/Lib/urllib.py
+++ b/Lib/urllib.py
@@ -203,10 +203,10 @@ def open(self, fullurl, data=None):
name = 'open_' + urltype
self.type = urltype
name = name.replace('-', '_')
-
+
# bpo-35907: # disallow the file reading with the type not allowed
if not hasattr(self, name) or \
- (self == _urlopener and name == 'open_local_file'):
+ getattr(self, name) == self.open_local_file:
if proxy:
return self.open_unknown_proxy(proxy, fullurl, data)
else:
From 3cda03c00109f9c1ae0df1760ecd60915cef105e Mon Sep 17 00:00:00 2001
From: SH <push0ebp@gmail.com>
Date: Tue, 21 May 2019 22:21:15 +0900
Subject: [PATCH 5/6] Update urllib.py
Modify the object to string in check method name.
---
Lib/urllib.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/Lib/urllib.py b/Lib/urllib.py
index 39b834054e9e..0bf5f4d5a21b 100644
--- a/Lib/urllib.py
+++ b/Lib/urllib.py
@@ -205,8 +205,7 @@ def open(self, fullurl, data=None):
name = name.replace('-', '_')
# bpo-35907: # disallow the file reading with the type not allowed
- if not hasattr(self, name) or \
- getattr(self, name) == self.open_local_file:
+ if not hasattr(self, name) or name == 'open_local_file':
if proxy:
return self.open_unknown_proxy(proxy, fullurl, data)
else:
From 8b7d7abff8c633e29a8f10bbf9cc7d9e656b0eec Mon Sep 17 00:00:00 2001
From: SH <push0ebp@gmail.com>
Date: Wed, 22 May 2019 03:48:56 +0900
Subject: [PATCH 6/6] Update urllib.py
Fix typo
---
Lib/urllib.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Lib/urllib.py b/Lib/urllib.py
index 0bf5f4d5a21b..156879dd0a14 100644
--- a/Lib/urllib.py
+++ b/Lib/urllib.py
@@ -204,7 +204,7 @@ def open(self, fullurl, data=None):
self.type = urltype
name = name.replace('-', '_')
- # bpo-35907: # disallow the file reading with the type not allowed
+ # bpo-35907: disallow the file reading with the type not allowed
if not hasattr(self, name) or name == 'open_local_file':
if proxy:
return self.open_unknown_proxy(proxy, fullurl, data)

+ 0
- 159
lang/python/python/patches/027-bpo-38243-Escape-the-server-title-of-DocXMLRPCServer.patch View File

@ -1,159 +0,0 @@
From b41cde823d026f2adc21ef14b1c2e92b1006de06 Mon Sep 17 00:00:00 2001
From: Dong-hee Na <donghee.na92@gmail.com>
Date: Sat, 28 Sep 2019 10:17:25 +0900
Subject: [PATCH 1/3] [2.7] bpo-38243: Escape the server title of
DocXMLRPCServer when rendering
---
Lib/DocXMLRPCServer.py | 10 +++++++++-
Lib/test/test_docxmlrpc.py | 20 +++++++++++++++++++
.../2019-09-25-13-21-09.bpo-38243.1pfz24.rst | 3 +++
3 files changed, 32 insertions(+), 1 deletion(-)
create mode 100644 Misc/NEWS.d/next/Security/2019-09-25-13-21-09.bpo-38243.1pfz24.rst
diff --git a/Lib/DocXMLRPCServer.py b/Lib/DocXMLRPCServer.py
index 4064ec2e48d4d..a0e407b6318ad 100644
--- a/Lib/DocXMLRPCServer.py
+++ b/Lib/DocXMLRPCServer.py
@@ -210,7 +210,15 @@ def generate_html_documentation(self):
methods
)
- return documenter.page(self.server_title, documentation)
+ escape_table = {
+ "&": "&amp;",
+ '"': "&quot;",
+ "'": "&#x27;",
+ ">": "&gt;",
+ "<": "&lt;",
+ }
+ title = ''.join(escape_table.get(c, c) for c in self.server_title)
+ return documenter.page(title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
diff --git a/Lib/test/test_docxmlrpc.py b/Lib/test/test_docxmlrpc.py
index 4dff4159e2466..c45b892b8b3e7 100644
--- a/Lib/test/test_docxmlrpc.py
+++ b/Lib/test/test_docxmlrpc.py
@@ -1,5 +1,6 @@
from DocXMLRPCServer import DocXMLRPCServer
import httplib
+import re
import sys
from test import test_support
threading = test_support.import_module('threading')
@@ -176,6 +177,25 @@ def test_autolink_dotted_methods(self):
self.assertIn("""Try&nbsp;self.<strong>add</strong>,&nbsp;too.""",
response.read())
+ def test_server_title_escape(self):
+ """Test that the server title and documentation
+ are escaped for HTML.
+ """
+ self.serv.set_server_title('test_title<script>')
+ self.serv.set_server_documentation('test_documentation<script>')
+ self.assertEqual('test_title<script>', self.serv.server_title)
+ self.assertEqual('test_documentation<script>',
+ self.serv.server_documentation)
+
+ generated = self.serv.generate_html_documentation()
+ title = re.search(r'<title>(.+?)</title>', generated).group()
+ documentation = re.search(r'<p><tt>(.+?)</tt></p>', generated).group()
+ self.assertEqual('<title>Python: test_title&lt;script&gt;</title>',
+ title)
+ self.assertEqual('<p><tt>test_documentation&lt;script&gt;</tt></p>',
+ documentation)
+
+
def test_main():
test_support.run_unittest(DocXMLRPCHTTPGETServer)
diff --git a/Misc/NEWS.d/next/Security/2019-09-25-13-21-09.bpo-38243.1pfz24.rst b/Misc/NEWS.d/next/Security/2019-09-25-13-21-09.bpo-38243.1pfz24.rst
new file mode 100644
index 0000000000000..8f02baed9ebe5
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2019-09-25-13-21-09.bpo-38243.1pfz24.rst
@@ -0,0 +1,3 @@
+Escape the server title of :class:`DocXMLRPCServer.DocXMLRPCServer`
+when rendering the document page as HTML.
+(Contributed by Dong-hee Na in :issue:`38243`.)
From 00251ae0244cfae1f5a77d15f3d0415c12b65ada Mon Sep 17 00:00:00 2001
From: Dong-hee Na <donghee.na92@gmail.com>
Date: Tue, 1 Oct 2019 09:31:33 +0900
Subject: [PATCH 2/3] bpo-38243:Refect victor's review
---
Lib/DocXMLRPCServer.py | 20 ++++++++++++--------
1 file changed, 12 insertions(+), 8 deletions(-)
diff --git a/Lib/DocXMLRPCServer.py b/Lib/DocXMLRPCServer.py
index a0e407b6318ad..6ab41c31b403e 100644
--- a/Lib/DocXMLRPCServer.py
+++ b/Lib/DocXMLRPCServer.py
@@ -20,6 +20,17 @@
CGIXMLRPCRequestHandler,
resolve_dotted_attribute)
+
+def _html_escape_quote(s, quote=True):
+ s = s.replace("&", "&amp;") # Must be done first!
+ s = s.replace("<", "&lt;")
+ s = s.replace(">", "&gt;")
+ if quote:
+ s = s.replace('"', "&quot;")
+ s = s.replace('\'', "&#x27;")
+ return s
+
+
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
@@ -210,14 +221,7 @@ def generate_html_documentation(self):
methods
)
- escape_table = {
- "&": "&amp;",
- '"': "&quot;",
- "'": "&#x27;",
- ">": "&gt;",
- "<": "&lt;",
- }
- title = ''.join(escape_table.get(c, c) for c in self.server_title)
+ title = _html_escape_quote(self.server_title)
return documenter.page(title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
From 09b17d8230a24586e417d52c332058f541d47999 Mon Sep 17 00:00:00 2001
From: Dong-hee Na <donghee.na92@gmail.com>
Date: Tue, 1 Oct 2019 19:35:34 +0900
Subject: [PATCH 3/3] bpo-38243: Update
---
Lib/DocXMLRPCServer.py | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/Lib/DocXMLRPCServer.py b/Lib/DocXMLRPCServer.py
index 6ab41c31b403e..90b037dd35d6b 100644
--- a/Lib/DocXMLRPCServer.py
+++ b/Lib/DocXMLRPCServer.py
@@ -21,13 +21,12 @@
resolve_dotted_attribute)
-def _html_escape_quote(s, quote=True):
+def _html_escape_quote(s):
s = s.replace("&", "&amp;") # Must be done first!
s = s.replace("<", "&lt;")
s = s.replace(">", "&gt;")
- if quote:
- s = s.replace('"', "&quot;")
- s = s.replace('\'', "&#x27;")
+ s = s.replace('"', "&quot;")
+ s = s.replace('\'', "&#x27;")
return s

+ 0
- 80
lang/python/python/patches/028-bpo-34155-Dont-parse-domains-containing-GH-13079.patch View File

@ -1,80 +0,0 @@
From c2828900ec85e1e2957016e1e078de3a9677a963 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Roberto=20C=2E=20S=C3=A1nchez?= <roberto@connexer.com>
Date: Tue, 10 Sep 2019 21:48:34 -0400
Subject: [PATCH] [2.7] bpo-34155: Dont parse domains containing @ (GH-13079)
https://bugs.python.org/issue34155
(cherry picked from commit 8cb65d1381b027f0b09ee36bfed7f35bb4dec9a9)
Excludes changes to Lib/email/_header_value_parser.py, which did not
exist in 2.7.
Co-authored-by: jpic <jpic@users.noreply.github.com>
---
Lib/email/_parseaddr.py | 11 ++++++++++-
Lib/email/test/test_email.py | 14 ++++++++++++++
.../2019-05-04-13-33-37.bpo-34155.MJll68.rst | 1 +
3 files changed, 25 insertions(+), 1 deletion(-)
create mode 100644 Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst
diff --git a/Lib/email/_parseaddr.py b/Lib/email/_parseaddr.py
index 690db2c22d34d..dc49d2e45a5eb 100644
--- a/Lib/email/_parseaddr.py
+++ b/Lib/email/_parseaddr.py
@@ -336,7 +336,12 @@ def getaddrspec(self):
aslist.append('@')
self.pos += 1
self.gotonext()
- return EMPTYSTRING.join(aslist) + self.getdomain()
+ domain = self.getdomain()
+ if not domain:
+ # Invalid domain, return an empty address instead of returning a
+ # local part to denote failed parsing.
+ return EMPTYSTRING
+ return EMPTYSTRING.join(aslist) + domain
def getdomain(self):
"""Get the complete domain name from an address."""
@@ -351,6 +356,10 @@ def getdomain(self):
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
+ elif self.field[self.pos] == '@':
+ # bpo-34155: Don't parse domains with two `@` like
+ # `a@malicious.org@important.com`.
+ return EMPTYSTRING
elif self.field[self.pos] in self.atomends:
break
else:
diff --git a/Lib/email/test/test_email.py b/Lib/email/test/test_email.py
index 4b4dee3d34644..2efe44ac5a73f 100644
--- a/Lib/email/test/test_email.py
+++ b/Lib/email/test/test_email.py
@@ -2306,6 +2306,20 @@ def test_parseaddr_empty(self):
self.assertEqual(Utils.parseaddr('<>'), ('', ''))
self.assertEqual(Utils.formataddr(Utils.parseaddr('<>')), '')
+ def test_parseaddr_multiple_domains(self):
+ self.assertEqual(
+ Utils.parseaddr('a@b@c'),
+ ('', '')
+ )
+ self.assertEqual(
+ Utils.parseaddr('a@b.c@c'),
+ ('', '')
+ )
+ self.assertEqual(
+ Utils.parseaddr('a@172.17.0.1@c'),
+ ('', '')
+ )
+
def test_noquote_dump(self):
self.assertEqual(
Utils.formataddr(('A Silly Person', 'person@dom.ain')),
diff --git a/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst b/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst
new file mode 100644
index 0000000000000..50292e29ed1d2
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2019-05-04-13-33-37.bpo-34155.MJll68.rst
@@ -0,0 +1 @@
+Fix parsing of invalid email addresses with more than one ``@`` (e.g. a@b@c.com.) to not return the part before 2nd ``@`` as valid email address. Patch by maxking & jpic.

Loading…
Cancel
Save