summaryrefslogtreecommitdiff
path: root/lang/python27
diff options
context:
space:
mode:
authorgutteridge <gutteridge@pkgsrc.org>2021-10-10 03:00:59 +0000
committergutteridge <gutteridge@pkgsrc.org>2021-10-10 03:00:59 +0000
commite26fd4471d4aa9dec5c33903b1f03f684786c986 (patch)
treed8dd557f209951d02c3d1fcd3820adef53a030c9 /lang/python27
parent6f77a72fdb38fb346526ab86b1ac5ceaa38d29bf (diff)
downloadpkgsrc-e26fd4471d4aa9dec5c33903b1f03f684786c986.tar.gz
python27: fix various security issues
Addresses CVE-2020-27619, CVE-2021-3177, CVE-2021-3733, CVE-2021-3737 and CVE-2021-23336. Patches mostly sourced via Fedora.
Diffstat (limited to 'lang/python27')
-rw-r--r--lang/python27/Makefile4
-rw-r--r--lang/python27/distinfo18
-rw-r--r--lang/python27/patches/patch-Doc_library_cgi.rst29
-rw-r--r--lang/python27/patches/patch-Doc_library_urlparse.rst51
-rw-r--r--lang/python27/patches/patch-Lib_cgi.py128
-rw-r--r--lang/python27/patches/patch-Lib_ctypes_test_test__parameters.py58
-rw-r--r--lang/python27/patches/patch-Lib_httplib.py58
-rw-r--r--lang/python27/patches/patch-Lib_test_multibytecodec__support.py46
-rw-r--r--lang/python27/patches/patch-Lib_test_test__cgi.py91
-rw-r--r--lang/python27/patches/patch-Lib_test_test__httplib.py23
-rw-r--r--lang/python27/patches/patch-Lib_test_test__urlparse.py265
-rw-r--r--lang/python27/patches/patch-Lib_urllib2.py11
-rw-r--r--lang/python27/patches/patch-Lib_urlparse.py127
-rw-r--r--lang/python27/patches/patch-Modules___ctypes_callproc.c125
14 files changed, 1010 insertions, 24 deletions
diff --git a/lang/python27/Makefile b/lang/python27/Makefile
index 161e8046c4e..097230428ed 100644
--- a/lang/python27/Makefile
+++ b/lang/python27/Makefile
@@ -1,9 +1,9 @@
-# $NetBSD: Makefile,v 1.93 2020/12/07 13:14:38 nia Exp $
+# $NetBSD: Makefile,v 1.94 2021/10/10 03:00:59 gutteridge Exp $
.include "dist.mk"
PKGNAME= python27-${PY_DISTVERSION}
-PKGREVISION= 3
+PKGREVISION= 4
CATEGORIES= lang python
MAINTAINER= pkgsrc-users@NetBSD.org
diff --git a/lang/python27/distinfo b/lang/python27/distinfo
index c0dadd531e8..7793846517d 100644
--- a/lang/python27/distinfo
+++ b/lang/python27/distinfo
@@ -1,14 +1,18 @@
-$NetBSD: distinfo,v 1.84 2021/10/07 14:21:10 nia Exp $
+$NetBSD: distinfo,v 1.85 2021/10/10 03:00:59 gutteridge Exp $
RMD160 (Python-2.7.18.tar.xz) = 40a514bb05c9e631454ea8466e28f5bb229428ad
SHA512 (Python-2.7.18.tar.xz) = a7bb62b51f48ff0b6df0b18f5b0312a523e3110f49c3237936bfe56ed0e26838c0274ff5401bda6fc21bf24337477ccac49e8026c5d651e4b4cafb5eb5086f6c
Size (Python-2.7.18.tar.xz) = 12854736 bytes
+SHA1 (patch-Doc_library_cgi.rst) = ed9ac101b0857dc573e9a648694d1ee5fabe61fb
+SHA1 (patch-Doc_library_urlparse.rst) = f9714b945a2bacb4ec5360c151a42192e00f08ad
SHA1 (patch-Include_pyerrors.h) = 0d2cd52d18cc719b895fa32ed7e11c6cb15bae54
SHA1 (patch-Include_pyport.h) = f3e4ddbc954425a65301465410911222ca471320
SHA1 (patch-Lib___osx__support.py) = 4389472565616b3875c699f6e3e74850d5fde712
+SHA1 (patch-Lib_cgi.py) = 9653904acfd2dbe03655a7cfa5688c450556671b
SHA1 (patch-Lib_ctypes_____init____.py) = 31dd0546bbe29ad1b1d481edc525ba43479c06da
SHA1 (patch-Lib_ctypes_macholib_dyld.py) = 9b7e972d4c71311742ca8b3501382182a4c9e2fe
SHA1 (patch-Lib_ctypes_test_test__macholib.py) = 4479d315cd037f4c9138e8f5baa8eb1685932baa
+SHA1 (patch-Lib_ctypes_test_test__parameters.py) = 8f8bb50515bc7e89ab59363b10af4d5391957eb7
SHA1 (patch-Lib_ctypes_util.py) = 6fa516c7b43f08992427a0afcbe80c17bcc070f1
SHA1 (patch-Lib_distutils_command_build__ext.py) = ea4feba4e93dbcff07050c82a00d591bb650e934
SHA1 (patch-Lib_distutils_command_install.py) = e6aef090b444b455fe351308d251e670329b7dc3
@@ -16,21 +20,25 @@ SHA1 (patch-Lib_distutils_command_install__egg__info.py) = ec7f9e0cd04489b1f6497
SHA1 (patch-Lib_distutils_tests_test__build__ext.py) = 6b3c8c8d1d351836b239c049d34d132953bd4786
SHA1 (patch-Lib_distutils_unixccompiler.py) = db16c9aca2f29730945f28247b88b18828739bbb
SHA1 (patch-Lib_distutils_util.py) = 5bcfad96f8e490351160f1a7c1f4ece7706a33fa
-SHA1 (patch-Lib_httplib.py) = 375d80eb79209f53046c62db128d8d3f64d9e765
+SHA1 (patch-Lib_httplib.py) = b8eeaa203e2a86ece94148d192b2a7e0c078602a
SHA1 (patch-Lib_lib2to3_pgen2_driver.py) = 5d6dab14197f27363394ff1aeee22a8ced8026d2
SHA1 (patch-Lib_multiprocessing_process.py) = 15699bd8ec822bf54a0631102e00e0a34f882803
SHA1 (patch-Lib_plistlib.py) = 96ae702995d434e2d7ec0ac62e37427a90b61d13
SHA1 (patch-Lib_sysconfig.py) = 8a7a0e5cbfec279a05945dffafea1b1131a76f0e
SHA1 (patch-Lib_tarfile.py) = df00aa1941367c42dcbbed4b6658b724a22ddcde
-SHA1 (patch-Lib_test_test__httplib.py) = 9d37263e36110838e0b5f413ff4747deb3966dfe
+SHA1 (patch-Lib_test_multibytecodec__support.py) = a18c40e8009f1a8f63e15196d3e751d7dccf8367
+SHA1 (patch-Lib_test_test__cgi.py) = 724355e8d2195f8a4b76d7ea61133e9b14fa3a68
+SHA1 (patch-Lib_test_test__httplib.py) = f7cfa5501a63eaca539bfa53d38cf931f3a6c3ac
SHA1 (patch-Lib_test_test__platform.py) = 3a3b8c05f9bf9adf4862b1022ce864127d36b8b0
SHA1 (patch-Lib_test_test__unicode.py) = 1bd182bdbd880d0a847f9d8b69277a607f9f0526
SHA1 (patch-Lib_test_test__urllib2.py) = 89baa57daf2f3282e4fc5009915dbc4910b96ef1
-SHA1 (patch-Lib_urllib2.py) = 33a85593da702447fa3ea74b4e3d36d0016f70b5
+SHA1 (patch-Lib_test_test__urlparse.py) = 257cb3bf7a0e9b5e0dcb204f675959b10953ba7b
+SHA1 (patch-Lib_urllib2.py) = 0cc0dc811bb9544496962e08b040b5c96fb9073c
+SHA1 (patch-Lib_urlparse.py) = ec45dd48966eb806a5c0e79af6a7369fb45b9859
SHA1 (patch-Mac_Tools_pythonw.c) = 2b9a60d4b349c240471fd305be69c28e0f654cdc
SHA1 (patch-Makefile.pre.in) = ceaf34237588b527478ce1f9163c9168382fa201
SHA1 (patch-Modules___ctypes_callbacks.c) = 8c335edfc9d2ef47988c5bdf1c3dd8473757637b
-SHA1 (patch-Modules___ctypes_callproc.c) = adac5eb047eb58c14003ea9237d5d34e8b327b2f
+SHA1 (patch-Modules___ctypes_callproc.c) = 7b669f9c081bbc2b7fce2c827703f52b7389d592
SHA1 (patch-Modules___ctypes_ctypes.h) = 07e9d5ecf8309a3ca4bf8382411d56dda08d7b27
SHA1 (patch-Modules___ctypes_malloc__closure.c) = 25d470cc66d218446227c7c1bd7ade409c53b8d0
SHA1 (patch-Modules___multiprocessing_multiprocessing.h) = 7ca8fe22ba4bdcde6d39dd50fe2e86c25994c146
diff --git a/lang/python27/patches/patch-Doc_library_cgi.rst b/lang/python27/patches/patch-Doc_library_cgi.rst
new file mode 100644
index 00000000000..c1b96238bae
--- /dev/null
+++ b/lang/python27/patches/patch-Doc_library_cgi.rst
@@ -0,0 +1,29 @@
+$NetBSD: patch-Doc_library_cgi.rst,v 1.1 2021/10/10 03:00:59 gutteridge Exp $
+
+Fix CVE-2021-23336: Add `separator` argument to parse_qs; warn with default
+Via Fedora:
+https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00359-CVE-2021-23336.patch
+
+--- Doc/library/cgi.rst.orig 2020-04-19 21:13:39.000000000 +0000
++++ Doc/library/cgi.rst
+@@ -285,10 +285,10 @@ These are useful if you want more contro
+ algorithms implemented in this module in other circumstances.
+
+
+-.. function:: parse(fp[, environ[, keep_blank_values[, strict_parsing]]])
++.. function:: parse(fp[, environ[, keep_blank_values[, strict_parsing[, separator]]]])
+
+ Parse a query in the environment or from a file (the file defaults to
+- ``sys.stdin`` and environment defaults to ``os.environ``). The *keep_blank_values* and *strict_parsing* parameters are
++ ``sys.stdin`` and environment defaults to ``os.environ``). The *keep_blank_values*, *strict_parsing* and *separator* parameters are
+ passed to :func:`urlparse.parse_qs` unchanged.
+
+
+@@ -316,7 +316,6 @@ algorithms implemented in this module in
+ Note that this does not parse nested multipart parts --- use
+ :class:`FieldStorage` for that.
+
+-
+ .. function:: parse_header(string)
+
+ Parse a MIME header (such as :mailheader:`Content-Type`) into a main value and a
diff --git a/lang/python27/patches/patch-Doc_library_urlparse.rst b/lang/python27/patches/patch-Doc_library_urlparse.rst
new file mode 100644
index 00000000000..5ac8f5801fa
--- /dev/null
+++ b/lang/python27/patches/patch-Doc_library_urlparse.rst
@@ -0,0 +1,51 @@
+$NetBSD: patch-Doc_library_urlparse.rst,v 1.1 2021/10/10 03:00:59 gutteridge Exp $
+
+Fix CVE-2021-23336: Add `separator` argument to parse_qs; warn with default
+Via Fedora:
+https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00359-CVE-2021-23336.patch
+
+--- Doc/library/urlparse.rst.orig 2020-04-19 21:13:39.000000000 +0000
++++ Doc/library/urlparse.rst
+@@ -136,7 +136,7 @@ The :mod:`urlparse` module defines the f
+ now raise :exc:`ValueError`.
+
+
+-.. function:: parse_qs(qs[, keep_blank_values[, strict_parsing[, max_num_fields]]])
++.. function:: parse_qs(qs[, keep_blank_values[, strict_parsing[, max_num_fields[, separator]]]])
+
+ Parse a query string given as a string argument (data of type
+ :mimetype:`application/x-www-form-urlencoded`). Data are returned as a
+@@ -157,6 +157,15 @@ The :mod:`urlparse` module defines the f
+ read. If set, then throws a :exc:`ValueError` if there are more than
+ *max_num_fields* fields read.
+
++ The optional argument *separator* is the symbol to use for separating the
++ query arguments. It is recommended to set it to ``'&'`` or ``';'``.
++ It defaults to ``'&'``; a warning is raised if this default is used.
++ This default may be changed with the following environment variable settings:
++
++ - ``PYTHON_URLLIB_QS_SEPARATOR='&'``: use only ``&`` as separator, without warning (as in Python 3.6.13+ or 3.10)
++ - ``PYTHON_URLLIB_QS_SEPARATOR=';'``: use only ``;`` as separator
++ - ``PYTHON_URLLIB_QS_SEPARATOR=legacy``: use both ``&`` and ``;`` (as in previous versions of Python)
++
+ Use the :func:`urllib.urlencode` function to convert such dictionaries into
+ query strings.
+
+@@ -186,6 +195,9 @@ The :mod:`urlparse` module defines the f
+ read. If set, then throws a :exc:`ValueError` if there are more than
+ *max_num_fields* fields read.
+
++ The optional argument *separator* is the symbol to use for separating the
++ query arguments. It works as in :py:func:`parse_qs`.
++
+ Use the :func:`urllib.urlencode` function to convert such lists of pairs into
+ query strings.
+
+@@ -195,6 +207,7 @@ The :mod:`urlparse` module defines the f
+ .. versionchanged:: 2.7.16
+ Added *max_num_fields* parameter.
+
++
+ .. function:: urlunparse(parts)
+
+ Construct a URL from a tuple as returned by ``urlparse()``. The *parts* argument
diff --git a/lang/python27/patches/patch-Lib_cgi.py b/lang/python27/patches/patch-Lib_cgi.py
new file mode 100644
index 00000000000..30e91c8a0f2
--- /dev/null
+++ b/lang/python27/patches/patch-Lib_cgi.py
@@ -0,0 +1,128 @@
+$NetBSD: patch-Lib_cgi.py,v 1.1 2021/10/10 03:00:59 gutteridge Exp $
+
+Fix CVE-2021-23336: Add `separator` argument to parse_qs; warn with default
+Via Fedora:
+https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00359-CVE-2021-23336.patch
+
+--- Lib/cgi.py.orig 2020-04-19 21:13:39.000000000 +0000
++++ Lib/cgi.py
+@@ -121,7 +121,8 @@ log = initlog # The current lo
+ # 0 ==> unlimited input
+ maxlen = 0
+
+-def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
++def parse(fp=None, environ=os.environ, keep_blank_values=0,
++ strict_parsing=0, separator=None):
+ """Parse a query in the environment or from a file (default stdin)
+
+ Arguments, all optional:
+@@ -140,6 +141,8 @@ def parse(fp=None, environ=os.environ, k
+ strict_parsing: flag indicating what to do with parsing errors.
+ If false (the default), errors are silently ignored.
+ If true, errors raise a ValueError exception.
++
++ separator: str. The symbol to use for separating the query arguments.
+ """
+ if fp is None:
+ fp = sys.stdin
+@@ -171,25 +174,26 @@ def parse(fp=None, environ=os.environ, k
+ else:
+ qs = ""
+ environ['QUERY_STRING'] = qs # XXX Shouldn't, really
+- return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
++ return urlparse.parse_qs(qs, keep_blank_values, strict_parsing, separator=separator)
+
+
+ # parse query string function called from urlparse,
+ # this is done in order to maintain backward compatibility.
+
+-def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
++def parse_qs(qs, keep_blank_values=0, strict_parsing=0, separator=None):
+ """Parse a query given as a string argument."""
+ warn("cgi.parse_qs is deprecated, use urlparse.parse_qs instead",
+ PendingDeprecationWarning, 2)
+- return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
++ return urlparse.parse_qs(qs, keep_blank_values, strict_parsing,
++ separator=separator)
+
+
+-def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None):
++def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None, separator=None):
+ """Parse a query given as a string argument."""
+ warn("cgi.parse_qsl is deprecated, use urlparse.parse_qsl instead",
+ PendingDeprecationWarning, 2)
+ return urlparse.parse_qsl(qs, keep_blank_values, strict_parsing,
+- max_num_fields)
++ max_num_fields, separator=separator)
+
+ def parse_multipart(fp, pdict):
+ """Parse multipart input.
+@@ -288,7 +292,6 @@ def parse_multipart(fp, pdict):
+
+ return partdict
+
+-
+ def _parseparam(s):
+ while s[:1] == ';':
+ s = s[1:]
+@@ -395,7 +398,7 @@ class FieldStorage:
+
+ def __init__(self, fp=None, headers=None, outerboundary="",
+ environ=os.environ, keep_blank_values=0, strict_parsing=0,
+- max_num_fields=None):
++ max_num_fields=None, separator=None):
+ """Constructor. Read multipart/* until last part.
+
+ Arguments, all optional:
+@@ -430,6 +433,7 @@ class FieldStorage:
+ self.keep_blank_values = keep_blank_values
+ self.strict_parsing = strict_parsing
+ self.max_num_fields = max_num_fields
++ self.separator = separator
+ if 'REQUEST_METHOD' in environ:
+ method = environ['REQUEST_METHOD'].upper()
+ self.qs_on_post = None
+@@ -613,7 +617,8 @@ class FieldStorage:
+ if self.qs_on_post:
+ qs += '&' + self.qs_on_post
+ query = urlparse.parse_qsl(qs, self.keep_blank_values,
+- self.strict_parsing, self.max_num_fields)
++ self.strict_parsing, self.max_num_fields,
++ self.separator)
+ self.list = [MiniFieldStorage(key, value) for key, value in query]
+ self.skip_lines()
+
+@@ -629,7 +634,8 @@ class FieldStorage:
+ query = urlparse.parse_qsl(self.qs_on_post,
+ self.keep_blank_values,
+ self.strict_parsing,
+- self.max_num_fields)
++ self.max_num_fields,
++ self.separator)
+ self.list.extend(MiniFieldStorage(key, value)
+ for key, value in query)
+ FieldStorageClass = None
+@@ -649,7 +655,8 @@ class FieldStorage:
+ headers = rfc822.Message(self.fp)
+ part = klass(self.fp, headers, ib,
+ environ, keep_blank_values, strict_parsing,
+- max_num_fields)
++ max_num_fields,
++ separator=self.separator)
+
+ if max_num_fields is not None:
+ max_num_fields -= 1
+@@ -817,10 +824,11 @@ class FormContentDict(UserDict.UserDict)
+ form.dict == {key: [val, val, ...], ...}
+
+ """
+- def __init__(self, environ=os.environ, keep_blank_values=0, strict_parsing=0):
++ def __init__(self, environ=os.environ, keep_blank_values=0, strict_parsing=0, separator=None):
+ self.dict = self.data = parse(environ=environ,
+ keep_blank_values=keep_blank_values,
+- strict_parsing=strict_parsing)
++ strict_parsing=strict_parsing,
++ separator=separator)
+ self.query_string = environ['QUERY_STRING']
+
+
diff --git a/lang/python27/patches/patch-Lib_ctypes_test_test__parameters.py b/lang/python27/patches/patch-Lib_ctypes_test_test__parameters.py
new file mode 100644
index 00000000000..ac0e62f7fd0
--- /dev/null
+++ b/lang/python27/patches/patch-Lib_ctypes_test_test__parameters.py
@@ -0,0 +1,58 @@
+$NetBSD: patch-Lib_ctypes_test_test__parameters.py,v 1.1 2021/10/10 03:00:59 gutteridge Exp $
+
+Fix CVE-2021-3177: Replace snprintf with Python unicode formatting in ctypes param reprs
+Via Fedora:
+https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00357-CVE-2021-3177.patch
+
+--- Lib/ctypes/test/test_parameters.py.orig 2020-04-19 21:13:39.000000000 +0000
++++ Lib/ctypes/test/test_parameters.py
+@@ -206,6 +206,49 @@ class SimpleTypesTestCase(unittest.TestC
+ with self.assertRaises(ZeroDivisionError):
+ WorseStruct().__setstate__({}, b'foo')
+
++ def test_parameter_repr(self):
++ from ctypes import (
++ c_bool,
++ c_char,
++ c_wchar,
++ c_byte,
++ c_ubyte,
++ c_short,
++ c_ushort,
++ c_int,
++ c_uint,
++ c_long,
++ c_ulong,
++ c_longlong,
++ c_ulonglong,
++ c_float,
++ c_double,
++ c_longdouble,
++ c_char_p,
++ c_wchar_p,
++ c_void_p,
++ )
++ self.assertRegexpMatches(repr(c_bool.from_param(True)), r"^<cparam '\?' at 0x[A-Fa-f0-9]+>$")
++ self.assertEqual(repr(c_char.from_param('a')), "<cparam 'c' ('a')>")
++ self.assertRegexpMatches(repr(c_wchar.from_param('a')), r"^<cparam 'u' at 0x[A-Fa-f0-9]+>$")
++ self.assertEqual(repr(c_byte.from_param(98)), "<cparam 'b' (98)>")
++ self.assertEqual(repr(c_ubyte.from_param(98)), "<cparam 'B' (98)>")
++ self.assertEqual(repr(c_short.from_param(511)), "<cparam 'h' (511)>")
++ self.assertEqual(repr(c_ushort.from_param(511)), "<cparam 'H' (511)>")
++ self.assertRegexpMatches(repr(c_int.from_param(20000)), r"^<cparam '[li]' \(20000\)>$")
++ self.assertRegexpMatches(repr(c_uint.from_param(20000)), r"^<cparam '[LI]' \(20000\)>$")
++ self.assertRegexpMatches(repr(c_long.from_param(20000)), r"^<cparam '[li]' \(20000\)>$")
++ self.assertRegexpMatches(repr(c_ulong.from_param(20000)), r"^<cparam '[LI]' \(20000\)>$")
++ self.assertRegexpMatches(repr(c_longlong.from_param(20000)), r"^<cparam '[liq]' \(20000\)>$")
++ self.assertRegexpMatches(repr(c_ulonglong.from_param(20000)), r"^<cparam '[LIQ]' \(20000\)>$")
++ self.assertEqual(repr(c_float.from_param(1.5)), "<cparam 'f' (1.5)>")
++ self.assertEqual(repr(c_double.from_param(1.5)), "<cparam 'd' (1.5)>")
++ self.assertEqual(repr(c_double.from_param(1e300)), "<cparam 'd' (1e+300)>")
++ self.assertRegexpMatches(repr(c_longdouble.from_param(1.5)), r"^<cparam ('d' \(1.5\)|'g' at 0x[A-Fa-f0-9]+)>$")
++ self.assertRegexpMatches(repr(c_char_p.from_param(b'hihi')), "^<cparam 'z' \(0x[A-Fa-f0-9]+\)>$")
++ self.assertRegexpMatches(repr(c_wchar_p.from_param('hihi')), "^<cparam 'Z' \(0x[A-Fa-f0-9]+\)>$")
++ self.assertRegexpMatches(repr(c_void_p.from_param(0x12)), r"^<cparam 'P' \(0x0*12\)>$")
++
+ ################################################################
+
+ if __name__ == '__main__':
diff --git a/lang/python27/patches/patch-Lib_httplib.py b/lang/python27/patches/patch-Lib_httplib.py
index 595b3515cfd..93d33b711a0 100644
--- a/lang/python27/patches/patch-Lib_httplib.py
+++ b/lang/python27/patches/patch-Lib_httplib.py
@@ -1,10 +1,14 @@
-$NetBSD: patch-Lib_httplib.py,v 1.3 2020/09/27 14:57:22 leot Exp $
+$NetBSD: patch-Lib_httplib.py,v 1.4 2021/10/10 03:00:59 gutteridge Exp $
bpo-39603 (CVE-2020-26116): header injection via HTTP method
-taken from:
+Taken from:
https://gitweb.gentoo.org/fork/cpython.git/commit/?h=gentoo-2.7-vanilla&id=138e2caeb4827ccfd1eaff2cf63afb79dfeeb3c4
+Fix CVE-2021-3737: http client infinite line reading (DoS) after a HTTP 100 Continue
+Via Fedora:
+https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00368-CVE-2021-3737.patch
+
--- Lib/httplib.py.orig 2020-04-19 21:13:39.000000000 +0000
+++ Lib/httplib.py
@@ -257,6 +257,10 @@ _contains_disallowed_url_pchar_re = re.c
@@ -18,7 +22,53 @@ https://gitweb.gentoo.org/fork/cpython.git/commit/?h=gentoo-2.7-vanilla&id=138e2
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
-@@ -935,6 +939,8 @@ class HTTPConnection:
+@@ -361,6 +365,25 @@ class HTTPMessage(mimetools.Message):
+ # It's not a header line; skip it and try the next line.
+ self.status = 'Non-header line where header expected'
+
++
++def _read_headers(fp):
++ """Reads potential header lines into a list from a file pointer.
++ Length of line is limited by _MAXLINE, and number of
++ headers is limited by _MAXHEADERS.
++ """
++ headers = []
++ while True:
++ line = fp.readline(_MAXLINE + 1)
++ if len(line) > _MAXLINE:
++ raise LineTooLong("header line")
++ headers.append(line)
++ if len(headers) > _MAXHEADERS:
++ raise HTTPException("got more than %d headers" % _MAXHEADERS)
++ if line in (b'\r\n', b'\n', b''):
++ break
++ return headers
++
++
+ class HTTPResponse:
+
+ # strict: If true, raise BadStatusLine if the status line can't be
+@@ -449,15 +472,10 @@ class HTTPResponse:
+ if status != CONTINUE:
+ break
+ # skip the header from the 100 response
+- while True:
+- skip = self.fp.readline(_MAXLINE + 1)
+- if len(skip) > _MAXLINE:
+- raise LineTooLong("header line")
+- skip = skip.strip()
+- if not skip:
+- break
+- if self.debuglevel > 0:
+- print "header:", skip
++ skipped_headers = _read_headers(self.fp)
++ if self.debuglevel > 0:
++ print("headers:", skipped_headers)
++ del skipped_headers
+
+ self.status = status
+ self.reason = reason.strip()
+@@ -935,6 +953,8 @@ class HTTPConnection:
else:
raise CannotSendRequest()
@@ -27,7 +77,7 @@ https://gitweb.gentoo.org/fork/cpython.git/commit/?h=gentoo-2.7-vanilla&id=138e2
# Save the method for use later in the response phase
self._method = method
-@@ -1020,6 +1026,17 @@ class HTTPConnection:
+@@ -1020,6 +1040,17 @@ class HTTPConnection:
# On Python 2, request is already encoded (default)
return request
diff --git a/lang/python27/patches/patch-Lib_test_multibytecodec__support.py b/lang/python27/patches/patch-Lib_test_multibytecodec__support.py
new file mode 100644
index 00000000000..479d2983e8f
--- /dev/null
+++ b/lang/python27/patches/patch-Lib_test_multibytecodec__support.py
@@ -0,0 +1,46 @@
+$NetBSD: patch-Lib_test_multibytecodec__support.py,v 1.1 2021/10/10 03:00:59 gutteridge Exp $
+
+Fix CVE-2020-27619: No longer call eval() on content received via HTTP in the CJK codec tests
+Via Fedora:
+https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00355-CVE-2020-27619.patch
+
+--- Lib/test/multibytecodec_support.py.orig 2020-04-19 21:13:39.000000000 +0000
++++ Lib/test/multibytecodec_support.py
+@@ -279,30 +279,22 @@ class TestBase_Mapping(unittest.TestCase
+ self._test_mapping_file_plain()
+
+ def _test_mapping_file_plain(self):
+- _unichr = lambda c: eval("u'\\U%08x'" % int(c, 16))
+- unichrs = lambda s: u''.join(_unichr(c) for c in s.split('+'))
++ def unichrs(s):
++ return ''.join(unichr(int(x, 16)) for x in s.split('+'))
+ urt_wa = {}
+
+ with self.open_mapping_file() as f:
+ for line in f:
+ if not line:
+ break
+- data = line.split('#')[0].strip().split()
++ data = line.split('#')[0].split()
+ if len(data) != 2:
+ continue
+
+- csetval = eval(data[0])
+- if csetval <= 0x7F:
+- csetch = chr(csetval & 0xff)
+- elif csetval >= 0x1000000:
+- csetch = chr(csetval >> 24) + chr((csetval >> 16) & 0xff) + \
+- chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
+- elif csetval >= 0x10000:
+- csetch = chr(csetval >> 16) + \
+- chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
+- elif csetval >= 0x100:
+- csetch = chr(csetval >> 8) + chr(csetval & 0xff)
+- else:
++ if data[0][:2] != '0x':
++ self.fail("Invalid line: {!r}".format(line))
++ csetch = bytes.fromhex(data[0][2:])
++ if len(csetch) == 1 and 0x80 <= csetch[0]:
+ continue
+
+ unich = unichrs(data[1])
diff --git a/lang/python27/patches/patch-Lib_test_test__cgi.py b/lang/python27/patches/patch-Lib_test_test__cgi.py
new file mode 100644
index 00000000000..4cad2a242bc
--- /dev/null
+++ b/lang/python27/patches/patch-Lib_test_test__cgi.py
@@ -0,0 +1,91 @@
+$NetBSD: patch-Lib_test_test__cgi.py,v 1.1 2021/10/10 03:00:59 gutteridge Exp $
+
+Fix CVE-2021-23336: Add `separator` argument to parse_qs; warn with default
+Via Fedora:
+https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00359-CVE-2021-23336.patch
+
+--- Lib/test/test_cgi.py.orig 2020-04-19 21:13:39.000000000 +0000
++++ Lib/test/test_cgi.py
+@@ -61,12 +61,9 @@ parse_strict_test_cases = [
+ ("", ValueError("bad query field: ''")),
+ ("&", ValueError("bad query field: ''")),
+ ("&&", ValueError("bad query field: ''")),
+- (";", ValueError("bad query field: ''")),
+- (";&;", ValueError("bad query field: ''")),
+ # Should the next few really be valid?
+ ("=", {}),
+ ("=&=", {}),
+- ("=;=", {}),
+ # This rest seem to make sense
+ ("=a", {'': ['a']}),
+ ("&=a", ValueError("bad query field: ''")),
+@@ -81,8 +78,6 @@ parse_strict_test_cases = [
+ ("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
+ ("a=a+b&a=b+a", {'a': ['a b', 'b a']}),
+ ("x=1&y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
+- ("x=1;y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
+- ("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
+ ("Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env",
+ {'Hbc5161168c542333633315dee1182227:key_store_seqid': ['400006'],
+ 'cuyer': ['r'],
+@@ -143,6 +138,60 @@ class CgiTests(unittest.TestCase):
+ if isinstance(expect, dict):
+ # test dict interface
+ self.assertEqual(len(expect), len(fcd))
++ self.assertItemsEqual(expect.keys(), fcd.keys())
++ self.assertItemsEqual(expect.values(), fcd.values())
++ self.assertItemsEqual(expect.items(), fcd.items())
++ self.assertEqual(fcd.get("nonexistent field", "default"), "default")
++ self.assertEqual(len(sd), len(fs))
++ self.assertItemsEqual(sd.keys(), fs.keys())
++ self.assertEqual(fs.getvalue("nonexistent field", "default"), "default")
++ # test individual fields
++ for key in expect.keys():
++ expect_val = expect[key]
++ self.assertTrue(fcd.has_key(key))
++ self.assertItemsEqual(fcd[key], expect[key])
++ self.assertEqual(fcd.get(key, "default"), fcd[key])
++ self.assertTrue(fs.has_key(key))
++ if len(expect_val) > 1:
++ single_value = 0
++ else:
++ single_value = 1
++ try:
++ val = sd[key]
++ except IndexError:
++ self.assertFalse(single_value)
++ self.assertEqual(fs.getvalue(key), expect_val)
++ else:
++ self.assertTrue(single_value)
++ self.assertEqual(val, expect_val[0])
++ self.assertEqual(fs.getvalue(key), expect_val[0])
++ self.assertItemsEqual(sd.getlist(key), expect_val)
++ if single_value:
++ self.assertItemsEqual(sd.values(),
++ first_elts(expect.values()))
++ self.assertItemsEqual(sd.items(),
++ first_second_elts(expect.items()))
++
++ def test_separator(self):
++ parse_semicolon = [
++ ("x=1;y=2.0", {'x': ['1'], 'y': ['2.0']}),
++ ("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
++ (";", ValueError("bad query field: ''")),
++ (";;", ValueError("bad query field: ''")),
++ ("=;a", ValueError("bad query field: 'a'")),
++ (";b=a", ValueError("bad query field: ''")),
++ ("b;=a", ValueError("bad query field: 'b'")),
++ ("a=a+b;b=b+c", {'a': ['a b'], 'b': ['b c']}),
++ ("a=a+b;a=b+a", {'a': ['a b', 'b a']}),
++ ]
++ for orig, expect in parse_semicolon:
++ env = {'QUERY_STRING': orig}
++ fcd = cgi.FormContentDict(env, separator=';')
++ sd = cgi.SvFormContentDict(env, separator=';')
++ fs = cgi.FieldStorage(environ=env, separator=';')
++ if isinstance(expect, dict):
++ # test dict interface
++ self.assertEqual(len(expect), len(fcd))
+ self.assertItemsEqual(expect.keys(), fcd.keys())
+ self.assertItemsEqual(expect.values(), fcd.values())
+ self.assertItemsEqual(expect.items(), fcd.items())
diff --git a/lang/python27/patches/patch-Lib_test_test__httplib.py b/lang/python27/patches/patch-Lib_test_test__httplib.py
index 4d2af5e442a..2b8b2507b0f 100644
--- a/lang/python27/patches/patch-Lib_test_test__httplib.py
+++ b/lang/python27/patches/patch-Lib_test_test__httplib.py
@@ -1,10 +1,14 @@
-$NetBSD: patch-Lib_test_test__httplib.py,v 1.3 2020/09/27 14:57:22 leot Exp $
+$NetBSD: patch-Lib_test_test__httplib.py,v 1.4 2021/10/10 03:00:59 gutteridge Exp $
bpo-39603 (CVE-2020-26116): header injection via HTTP method
-taken from:
+Taken from:
https://gitweb.gentoo.org/fork/cpython.git/commit/?h=gentoo-2.7-vanilla&id=138e2caeb4827ccfd1eaff2cf63afb79dfeeb3c4
+Fix CVE-2021-3737: http client infinite line reading (DoS) after a HTTP 100 Continue
+Via Fedora:
+https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00368-CVE-2021-3737.patch
+
--- Lib/test/test_httplib.py.orig 2020-04-19 21:13:39.000000000 +0000
+++ Lib/test/test_httplib.py
@@ -384,6 +384,26 @@ class HeaderTests(TestCase):
@@ -34,3 +38,18 @@ https://gitweb.gentoo.org/fork/cpython.git/commit/?h=gentoo-2.7-vanilla&id=138e2
class BasicTest(TestCase):
def test_status_lines(self):
+@@ -655,6 +675,14 @@ class BasicTest(TestCase):
+ resp = httplib.HTTPResponse(FakeSocket(body))
+ self.assertRaises(httplib.LineTooLong, resp.begin)
+
++ def test_overflowing_header_limit_after_100(self):
++ body = (
++ 'HTTP/1.1 100 OK\r\n'
++ 'r\n' * 32768
++ )
++ resp = httplib.HTTPResponse(FakeSocket(body))
++ self.assertRaises(httplib.HTTPException, resp.begin)
++
+ def test_overflowing_chunked_line(self):
+ body = (
+ 'HTTP/1.1 200 OK\r\n'
diff --git a/lang/python27/patches/patch-Lib_test_test__urlparse.py b/lang/python27/patches/patch-Lib_test_test__urlparse.py
new file mode 100644
index 00000000000..ebf1b8d3799
--- /dev/null
+++ b/lang/python27/patches/patch-Lib_test_test__urlparse.py
@@ -0,0 +1,265 @@
+$NetBSD: patch-Lib_test_test__urlparse.py,v 1.1 2021/10/10 03:00:59 gutteridge Exp $
+
+Fix CVE-2021-23336: Add `separator` argument to parse_qs; warn with default
+Via Fedora:
+https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00359-CVE-2021-23336.patch
+
+--- Lib/test/test_urlparse.py.orig 2020-04-19 21:13:39.000000000 +0000
++++ Lib/test/test_urlparse.py
+@@ -3,6 +3,12 @@ import sys
+ import unicodedata
+ import unittest
+ import urlparse
++from test.support import EnvironmentVarGuard
++from warnings import catch_warnings, filterwarnings
++import tempfile
++import contextlib
++import os.path
++import shutil
+
+ RFC1808_BASE = "http://a/b/c/d;p?q#f"
+ RFC2396_BASE = "http://a/b/c/d;p?q"
+@@ -24,16 +30,29 @@ parse_qsl_test_cases = [
+ ("&a=b", [('a', 'b')]),
+ ("a=a+b&b=b+c", [('a', 'a b'), ('b', 'b c')]),
+ ("a=1&a=2", [('a', '1'), ('a', '2')]),
++]
++
++parse_qsl_test_cases_semicolon = [
+ (";", []),
+ (";;", []),
+ (";a=b", [('a', 'b')]),
+ ("a=a+b;b=b+c", [('a', 'a b'), ('b', 'b c')]),
+ ("a=1;a=2", [('a', '1'), ('a', '2')]),
+- (b";", []),
+- (b";;", []),
+- (b";a=b", [(b'a', b'b')]),
+- (b"a=a+b;b=b+c", [(b'a', b'a b'), (b'b', b'b c')]),
+- (b"a=1;a=2", [(b'a', b'1'), (b'a', b'2')]),
++]
++
++parse_qsl_test_cases_legacy = [
++ ("a=1;a=2&a=3", [('a', '1'), ('a', '2'), ('a', '3')]),
++ ("a=1;b=2&c=3", [('a', '1'), ('b', '2'), ('c', '3')]),
++ ("a=1&b=2&c=3;", [('a', '1'), ('b', '2'), ('c', '3')]),
++]
++
++parse_qsl_test_cases_warn = [
++ (";a=b", [(';a', 'b')]),
++ ("a=a+b;b=b+c", [('a', 'a b;b=b c')]),
++ (b";a=b", [(b';a', b'b')]),
++ (b"a=a+b;b=b+c", [(b'a', b'a b;b=b c')]),
++ ("a=1;a=2&a=3", [('a', '1;a=2'), ('a', '3')]),
++ (b"a=1;a=2&a=3", [(b'a', b'1;a=2'), (b'a', b'3')]),
+ ]
+
+ parse_qs_test_cases = [
+@@ -57,6 +76,9 @@ parse_qs_test_cases = [
+ (b"&a=b", {b'a': [b'b']}),
+ (b"a=a+b&b=b+c", {b'a': [b'a b'], b'b': [b'b c']}),
+ (b"a=1&a=2", {b'a': [b'1', b'2']}),
++]
++
++parse_qs_test_cases_semicolon = [
+ (";", {}),
+ (";;", {}),
+ (";a=b", {'a': ['b']}),
+@@ -69,6 +91,24 @@ parse_qs_test_cases = [
+ (b"a=1;a=2", {b'a': [b'1', b'2']}),
+ ]
+
++parse_qs_test_cases_legacy = [
++ ("a=1;a=2&a=3", {'a': ['1', '2', '3']}),
++ ("a=1;b=2&c=3", {'a': ['1'], 'b': ['2'], 'c': ['3']}),
++ ("a=1&b=2&c=3;", {'a': ['1'], 'b': ['2'], 'c': ['3']}),
++ (b"a=1;a=2&a=3", {b'a': [b'1', b'2', b'3']}),
++ (b"a=1;b=2&c=3", {b'a': [b'1'], b'b': [b'2'], b'c': [b'3']}),
++ (b"a=1&b=2&c=3;", {b'a': [b'1'], b'b': [b'2'], b'c': [b'3']}),
++]
++
++parse_qs_test_cases_warn = [
++ (";a=b", {';a': ['b']}),
++ ("a=a+b;b=b+c", {'a': ['a b;b=b c']}),
++ (b";a=b", {b';a': [b'b']}),
++ (b"a=a+b;b=b+c", {b'a':[ b'a b;b=b c']}),
++ ("a=1;a=2&a=3", {'a': ['1;a=2', '3']}),
++ (b"a=1;a=2&a=3", {b'a': [b'1;a=2', b'3']}),
++]
++
+ class UrlParseTestCase(unittest.TestCase):
+
+ def checkRoundtrips(self, url, parsed, split):
+@@ -141,6 +181,40 @@ class UrlParseTestCase(unittest.TestCase
+ self.assertEqual(result, expect_without_blanks,
+ "Error parsing %r" % orig)
+
++ def test_qs_default_warn(self):
++ for orig, expect in parse_qs_test_cases_warn:
++ with catch_warnings(record=True) as w:
++ filterwarnings(action='always',
++ category=urlparse._QueryStringSeparatorWarning)
++ result = urlparse.parse_qs(orig, keep_blank_values=True)
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ self.assertEqual(len(w), 1)
++ self.assertEqual(w[0].category, urlparse._QueryStringSeparatorWarning)
++
++ def test_qsl_default_warn(self):
++ for orig, expect in parse_qsl_test_cases_warn:
++ with catch_warnings(record=True) as w:
++ filterwarnings(action='always',
++ category=urlparse._QueryStringSeparatorWarning)
++ result = urlparse.parse_qsl(orig, keep_blank_values=True)
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ self.assertEqual(len(w), 1)
++ self.assertEqual(w[0].category, urlparse._QueryStringSeparatorWarning)
++
++ def test_default_qs_no_warnings(self):
++ for orig, expect in parse_qs_test_cases:
++ with catch_warnings(record=True) as w:
++ result = urlparse.parse_qs(orig, keep_blank_values=True)
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ self.assertEqual(len(w), 0)
++
++ def test_default_qsl_no_warnings(self):
++ for orig, expect in parse_qsl_test_cases:
++ with catch_warnings(record=True) as w:
++ result = urlparse.parse_qsl(orig, keep_blank_values=True)
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ self.assertEqual(len(w), 0)
++
+ def test_roundtrips(self):
+ testcases = [
+ ('file:///tmp/junk.txt',
+@@ -626,6 +700,132 @@ class UrlParseTestCase(unittest.TestCase
+ self.assertEqual(urlparse.urlparse("http://www.python.org:80"),
+ ('http','www.python.org:80','','','',''))
+
++ def test_parse_qs_separator_bytes(self):
++ expected = {b'a': [b'1'], b'b': [b'2']}
++
++ result = urlparse.parse_qs(b'a=1;b=2', separator=b';')
++ self.assertEqual(result, expected)
++ result = urlparse.parse_qs(b'a=1;b=2', separator=';')
++ self.assertEqual(result, expected)
++ result = urlparse.parse_qs('a=1;b=2', separator=';')
++ self.assertEqual(result, {'a': ['1'], 'b': ['2']})
++
++ @contextlib.contextmanager
++ def _qsl_sep_config(self, sep):
++ """Context for the given parse_qsl default separator configured in config file"""
++ old_filename = urlparse._QS_SEPARATOR_CONFIG_FILENAME
++ urlparse._default_qs_separator = None
++ try:
++ tmpdirname = tempfile.mkdtemp()
++ filename = os.path.join(tmpdirname, 'conf.cfg')
++ with open(filename, 'w') as file:
++ file.write('[parse_qs]\n')
++ file.write('PYTHON_URLLIB_QS_SEPARATOR = {}'.format(sep))
++ urlparse._QS_SEPARATOR_CONFIG_FILENAME = filename
++ yield
++ finally:
++ urlparse._QS_SEPARATOR_CONFIG_FILENAME = old_filename
++ urlparse._default_qs_separator = None
++ shutil.rmtree(tmpdirname)
++
++ def test_parse_qs_separator_semicolon(self):
++ for orig, expect in parse_qs_test_cases_semicolon:
++ result = urlparse.parse_qs(orig, separator=';')
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
++ environ['PYTHON_URLLIB_QS_SEPARATOR'] = ';'
++ result = urlparse.parse_qs(orig)
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ self.assertEqual(len(w), 0)
++ with self._qsl_sep_config(';'), catch_warnings(record=True) as w:
++ result = urlparse.parse_qs(orig)
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ self.assertEqual(len(w), 0)
++
++ def test_parse_qsl_separator_semicolon(self):
++ for orig, expect in parse_qsl_test_cases_semicolon:
++ result = urlparse.parse_qsl(orig, separator=';')
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
++ environ['PYTHON_URLLIB_QS_SEPARATOR'] = ';'
++ result = urlparse.parse_qsl(orig)
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ self.assertEqual(len(w), 0)
++ with self._qsl_sep_config(';'), catch_warnings(record=True) as w:
++ result = urlparse.parse_qsl(orig)
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ self.assertEqual(len(w), 0)
++
++ def test_parse_qs_separator_legacy(self):
++ for orig, expect in parse_qs_test_cases_legacy:
++ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
++ environ['PYTHON_URLLIB_QS_SEPARATOR'] = 'legacy'
++ result = urlparse.parse_qs(orig)
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ self.assertEqual(len(w), 0)
++ with self._qsl_sep_config('legacy'), catch_warnings(record=True) as w:
++ result = urlparse.parse_qs(orig)
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ self.assertEqual(len(w), 0)
++
++ def test_parse_qsl_separator_legacy(self):
++ for orig, expect in parse_qsl_test_cases_legacy:
++ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
++ environ['PYTHON_URLLIB_QS_SEPARATOR'] = 'legacy'
++ result = urlparse.parse_qsl(orig)
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ self.assertEqual(len(w), 0)
++ with self._qsl_sep_config('legacy'), catch_warnings(record=True) as w:
++ result = urlparse.parse_qsl(orig)
++ self.assertEqual(result, expect, "Error parsing %r" % orig)
++ self.assertEqual(len(w), 0)
++
++ def test_parse_qs_separator_bad_value_env_or_config(self):
++ for bad_sep in '', 'abc', 'safe', '&;', 'SEP':
++ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
++ environ['PYTHON_URLLIB_QS_SEPARATOR'] = bad_sep
++ with self.assertRaises(ValueError):
++ urlparse.parse_qsl('a=1;b=2')
++ with self._qsl_sep_config('bad_sep'), catch_warnings(record=True) as w:
++ with self.assertRaises(ValueError):
++ urlparse.parse_qsl('a=1;b=2')
++
++ def test_parse_qs_separator_bad_value_arg(self):
++ for bad_sep in True, {}, '':
++ with self.assertRaises(ValueError):
++ urlparse.parse_qsl('a=1;b=2', separator=bad_sep)
++
++ def test_parse_qs_separator_num_fields(self):
++ for qs, sep in (
++ ('a&b&c', '&'),
++ ('a;b;c', ';'),
++ ('a&b;c', 'legacy'),
++ ):
++ with EnvironmentVarGuard() as environ, catch_warnings(record=True) as w:
++ if sep != 'legacy':
++ with self.assertRaises(ValueError):
++ urlparse.parse_qsl(qs, separator=sep, max_num_fields=2)
++ if sep:
++ environ['PYTHON_URLLIB_QS_SEPARATOR'] = sep
++ with self.assertRaises(ValueError):
++ urlparse.parse_qsl(qs, max_num_fields=2)
++
++ def test_parse_qs_separator_priority(self):
++ # env variable trumps config file
++ with self._qsl_sep_config('~'), EnvironmentVarGuard() as environ:
++ environ['PYTHON_URLLIB_QS_SEPARATOR'] = '!'
++ result = urlparse.parse_qs('a=1!b=2~c=3')
++ self.assertEqual(result, {'a': ['1'], 'b': ['2~c=3']})
++ # argument trumps config file
++ with self._qsl_sep_config('~'):
++ result = urlparse.parse_qs('a=1$b=2~c=3', separator='$')
++ self.assertEqual(result, {'a': ['1'], 'b': ['2~c=3']})
++ # argument trumps env variable
++ with EnvironmentVarGuard() as environ:
++ environ['PYTHON_URLLIB_QS_SEPARATOR'] = '~'
++ result = urlparse.parse_qs('a=1$b=2~c=3', separator='$')
++ self.assertEqual(result, {'a': ['1'], 'b': ['2~c=3']})
++
+ def test_urlsplit_normalization(self):
+ # Certain characters should never occur in the netloc,
+ # including under normalization.
diff --git a/lang/python27/patches/patch-Lib_urllib2.py b/lang/python27/patches/patch-Lib_urllib2.py
index 709a6d15845..68f91c40ca4 100644
--- a/lang/python27/patches/patch-Lib_urllib2.py
+++ b/lang/python27/patches/patch-Lib_urllib2.py
@@ -1,10 +1,15 @@
-$NetBSD: patch-Lib_urllib2.py,v 1.2 2020/09/20 12:10:27 mgorny Exp $
+$NetBSD: patch-Lib_urllib2.py,v 1.3 2021/10/10 03:00:59 gutteridge Exp $
bpo-39503 (CVE-2020-8492): ReDoS on AbstractBasicAuthHandler
-taken from:
+Taken from:
https://gitweb.gentoo.org/fork/cpython.git/commit/?h=gentoo-2.7-vanilla&id=2273e65e11dd0234f2f51ebaef61fc6e848d4059
+bpo-43075 (CVE-2021-3733): Fix ReDoS in request
+
+Taken from:
+https://github.com/python/cpython/pull/24391/
+
--- Lib/urllib2.py.orig 2020-04-19 21:13:39.000000000 +0000
+++ Lib/urllib2.py
@@ -856,8 +856,15 @@ class AbstractBasicAuthHandler:
@@ -15,7 +20,7 @@ https://gitweb.gentoo.org/fork/cpython.git/commit/?h=gentoo-2.7-vanilla&id=2273e
- 'realm=(["\']?)([^"\']*)\\2', re.I)
+ rx = re.compile('(?:^|,)' # start of the string or ','
+ '[ \t]*' # optional whitespaces
-+ '([^ \t]+)' # scheme like "Basic"
++ '([^ \t,]+)' # scheme like "Basic"
+ '[ \t]+' # mandatory whitespaces
+ # realm=xxx
+ # realm='xxx'
diff --git a/lang/python27/patches/patch-Lib_urlparse.py b/lang/python27/patches/patch-Lib_urlparse.py
new file mode 100644
index 00000000000..03bcd74422c
--- /dev/null
+++ b/lang/python27/patches/patch-Lib_urlparse.py
@@ -0,0 +1,127 @@
+$NetBSD: patch-Lib_urlparse.py,v 1.1 2021/10/10 03:00:59 gutteridge Exp $
+
+Fix CVE-2021-23336: Add `separator` argument to parse_qs; warn with default
+Via Fedora:
+https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00359-CVE-2021-23336.patch
+
+--- Lib/urlparse.py.orig 2020-04-19 21:13:39.000000000 +0000
++++ Lib/urlparse.py
+@@ -29,6 +29,7 @@ test_urlparse.py provides a good indicat
+ """
+
+ import re
++import os
+
+ __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
+ "urlsplit", "urlunsplit", "parse_qs", "parse_qsl"]
+@@ -382,7 +383,8 @@ def unquote(s):
+ append(item)
+ return ''.join(res)
+
+-def parse_qs(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None):
++def parse_qs(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None,
++ separator=None):
+ """Parse a query given as a string argument.
+
+ Arguments:
+@@ -405,14 +407,23 @@ def parse_qs(qs, keep_blank_values=0, st
+ """
+ dict = {}
+ for name, value in parse_qsl(qs, keep_blank_values, strict_parsing,
+- max_num_fields):
++ max_num_fields, separator):
+ if name in dict:
+ dict[name].append(value)
+ else:
+ dict[name] = [value]
+ return dict
+
+-def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None):
++class _QueryStringSeparatorWarning(RuntimeWarning):
++ """Warning for using default `separator` in parse_qs or parse_qsl"""
++
++# The default "separator" for parse_qsl can be specified in a config file.
++# It's cached after first read.
++#_QS_SEPARATOR_CONFIG_FILENAME = '/etc/python/urllib.cfg'
++_default_qs_separator = None
++
++def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None,
++ separator=None):
+ """Parse a query given as a string argument.
+
+ Arguments:
+@@ -434,15 +445,72 @@ def parse_qsl(qs, keep_blank_values=0, s
+
+ Returns a list, as G-d intended.
+ """
++
++ if (not separator or (not isinstance(separator, (str, bytes)))) and separator is not None:
++ raise ValueError("Separator must be of type string or bytes.")
++
++ # Used when both "&" and ";" act as separators. (Need a non-string value.)
++ _legacy = object()
++
++ if separator is None:
++ global _default_qs_separator
++ separator = _default_qs_separator
++ envvar_name = 'PYTHON_URLLIB_QS_SEPARATOR'
++ if separator is None:
++ # Set default separator from environment variable
++ separator = os.environ.get(envvar_name)
++ config_source = 'environment variable'
++ if separator is None:
++ # Set default separator from the configuration file
++ try:
++ file = open(_QS_SEPARATOR_CONFIG_FILENAME)
++ except EnvironmentError:
++ pass
++ else:
++ with file:
++ import ConfigParser
++ config = ConfigParser.ConfigParser()
++ config.readfp(file)
++ separator = config.get('parse_qs', envvar_name)
++ _default_qs_separator = separator
++ config_source = _QS_SEPARATOR_CONFIG_FILENAME
++ if separator is None:
++ # The default is '&', but warn if not specified explicitly
++ if ';' in qs:
++ from warnings import warn
++ warn("The default separator of urlparse.parse_qsl and "
++ + "parse_qs was changed to '&' to avoid a web cache "
++ + "poisoning issue (CVE-2021-23336). "
++ + "By default, semicolons no longer act as query field "
++ + "separators. "
++ + "See https://access.redhat.com/articles/5860431 for "
++ + "more details.",
++ _QueryStringSeparatorWarning, stacklevel=2)
++ separator = '&'
++ elif separator == 'legacy':
++ separator = _legacy
++ elif len(separator) != 1:
++ raise ValueError(
++ '{} (from {}) must contain '.format(envvar_name, config_source)
++ + '1 character, or "legacy". See '
++ + 'https://access.redhat.com/articles/5860431 for more details.'
++ )
++
+ # If max_num_fields is defined then check that the number of fields
+ # is less than max_num_fields. This prevents a memory exhaustion DOS
+ # attack via post bodies with many fields.
+ if max_num_fields is not None:
+- num_fields = 1 + qs.count('&') + qs.count(';')
++ if separator is _legacy:
++ num_fields = 1 + qs.count('&') + qs.count(';')
++ else:
++ num_fields = 1 + qs.count(separator)
+ if max_num_fields < num_fields:
+ raise ValueError('Max number of fields exceeded')
+
+- pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
++ if separator is _legacy:
++ pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
++ else:
++ pairs = [s1 for s1 in qs.split(separator)]
+ r = []
+ for name_value in pairs:
+ if not name_value and not strict_parsing:
diff --git a/lang/python27/patches/patch-Modules___ctypes_callproc.c b/lang/python27/patches/patch-Modules___ctypes_callproc.c
index 565eda85607..7f0c66227bb 100644
--- a/lang/python27/patches/patch-Modules___ctypes_callproc.c
+++ b/lang/python27/patches/patch-Modules___ctypes_callproc.c
@@ -1,8 +1,12 @@
-$NetBSD: patch-Modules___ctypes_callproc.c,v 1.1 2021/06/23 18:30:24 schmonz Exp $
+$NetBSD: patch-Modules___ctypes_callproc.c,v 1.2 2021/10/10 03:00:59 gutteridge Exp $
macOS arm64 support, via MacPorts.
---- Modules/_ctypes/callproc.c.orig 2021-06-22 19:20:28.000000000 +0000
+Fix CVE-2021-3177: Replace snprintf with Python unicode formatting in ctypes param reprs
+Via Fedora:
+https://src.fedoraproject.org/rpms/python2.7/blob/rawhide/f/00357-CVE-2021-3177.patch
+
+--- Modules/_ctypes/callproc.c.orig 2020-04-19 21:13:39.000000000 +0000
+++ Modules/_ctypes/callproc.c
@@ -74,6 +74,10 @@
#include <malloc.h>
@@ -15,7 +19,112 @@ macOS arm64 support, via MacPorts.
#include <ffi.h>
#include "ctypes.h"
#ifdef HAVE_ALLOCA_H
-@@ -773,7 +777,8 @@ static int _call_function_pointer(int fl
+@@ -460,50 +464,62 @@ PyCArg_dealloc(PyCArgObject *self)
+ static PyObject *
+ PyCArg_repr(PyCArgObject *self)
+ {
+- char buffer[256];
+ switch(self->tag) {
+ case 'b':
+ case 'B':
+- sprintf(buffer, "<cparam '%c' (%d)>",
++ return PyString_FromFormat("<cparam '%c' (%d)>",
+ self->tag, self->value.b);
+- break;
+ case 'h':
+ case 'H':
+- sprintf(buffer, "<cparam '%c' (%d)>",
++ return PyString_FromFormat("<cparam '%c' (%d)>",
+ self->tag, self->value.h);
+- break;
+ case 'i':
+ case 'I':
+- sprintf(buffer, "<cparam '%c' (%d)>",
++ return PyString_FromFormat("<cparam '%c' (%d)>",
+ self->tag, self->value.i);
+- break;
+ case 'l':
+ case 'L':
+- sprintf(buffer, "<cparam '%c' (%ld)>",
++ return PyString_FromFormat("<cparam '%c' (%ld)>",
+ self->tag, self->value.l);
+- break;
+
+ #ifdef HAVE_LONG_LONG
+ case 'q':
+ case 'Q':
+- sprintf(buffer,
+- "<cparam '%c' (%" PY_FORMAT_LONG_LONG "d)>",
++ return PyString_FromFormat("<cparam '%c' (%lld)>",
+ self->tag, self->value.q);
+- break;
+ #endif
+ case 'd':
+- sprintf(buffer, "<cparam '%c' (%f)>",
+- self->tag, self->value.d);
+- break;
+- case 'f':
+- sprintf(buffer, "<cparam '%c' (%f)>",
+- self->tag, self->value.f);
+- break;
+-
++ case 'f': {
++ PyObject *s = PyString_FromFormat("<cparam '%c' (", self->tag);
++ if (s == NULL) {
++ return NULL;
++ }
++ PyObject *f = PyFloat_FromDouble((self->tag == 'f') ? self->value.f : self->value.d);
++ if (f == NULL) {
++ Py_DECREF(s);
++ return NULL;
++ }
++ PyObject *r = PyObject_Repr(f);
++ Py_DECREF(f);
++ if (r == NULL) {
++ Py_DECREF(s);
++ return NULL;
++ }
++ PyString_ConcatAndDel(&s, r);
++ if (s == NULL) {
++ return NULL;
++ }
++ r = PyString_FromString(")>");
++ if (r == NULL) {
++ Py_DECREF(s);
++ return NULL;
++ }
++ PyString_ConcatAndDel(&s, r);
++ return s;
++ }
+ case 'c':
+- sprintf(buffer, "<cparam '%c' (%c)>",
++ return PyString_FromFormat("<cparam '%c' ('%c')>",
+ self->tag, self->value.c);
+- break;
+
+ /* Hm, are these 'z' and 'Z' codes useful at all?
+ Shouldn't they be replaced by the functionality of c_string
+@@ -512,16 +528,13 @@ PyCArg_repr(PyCArgObject *self)
+ case 'z':
+ case 'Z':
+ case 'P':
+- sprintf(buffer, "<cparam '%c' (%p)>",
++ return PyUnicode_FromFormat("<cparam '%c' (%p)>",
+ self->tag, self->value.p);
+- break;
+
+ default:
+- sprintf(buffer, "<cparam '%c' at %p>",
+- self->tag, self);
+- break;
++ return PyString_FromFormat("<cparam '%c' at %p>",
++ (unsigned char)self->tag, (void *)self);
+ }
+- return PyString_FromString(buffer);
+ }
+
+ static PyMemberDef PyCArgType_members[] = {
+@@ -773,7 +786,8 @@ static int _call_function_pointer(int fl
ffi_type **atypes,
ffi_type *restype,
void *resmem,
@@ -25,7 +134,7 @@ macOS arm64 support, via MacPorts.
{
#ifdef WITH_THREAD
PyThreadState *_save = NULL; /* For Py_BLOCK_THREADS and Py_UNBLOCK_THREADS */
-@@ -801,6 +806,37 @@ static int _call_function_pointer(int fl
+@@ -801,6 +815,37 @@ static int _call_function_pointer(int fl
if ((flags & FUNCFLAG_CDECL) == 0)
cc = FFI_STDCALL;
#endif
@@ -63,7 +172,7 @@ macOS arm64 support, via MacPorts.
if (FFI_OK != ffi_prep_cif(&cif,
cc,
argcount,
-@@ -810,6 +846,7 @@ static int _call_function_pointer(int fl
+@@ -810,6 +855,7 @@ static int _call_function_pointer(int fl
"ffi_prep_cif failed");
return -1;
}
@@ -71,7 +180,7 @@ macOS arm64 support, via MacPorts.
if (flags & (FUNCFLAG_USE_ERRNO | FUNCFLAG_USE_LASTERROR)) {
error_object = _ctypes_get_errobj(&space);
-@@ -1183,6 +1220,9 @@ PyObject *_ctypes_callproc(PPROC pProc,
+@@ -1183,6 +1229,9 @@ PyObject *_ctypes_callproc(PPROC pProc,
rtype, resbuf,
Py_SAFE_DOWNCAST(argcount,
Py_ssize_t,
@@ -81,7 +190,7 @@ macOS arm64 support, via MacPorts.
int)))
goto cleanup;
-@@ -1416,6 +1456,25 @@ copy_com_pointer(PyObject *self, PyObjec
+@@ -1416,6 +1465,25 @@ copy_com_pointer(PyObject *self, PyObjec
}
#else
@@ -107,7 +216,7 @@ macOS arm64 support, via MacPorts.
static PyObject *py_dl_open(PyObject *self, PyObject *args)
{
char *name;
-@@ -1940,6 +1999,9 @@ PyMethodDef _ctypes_module_methods[] = {
+@@ -1940,6 +2008,9 @@ PyMethodDef _ctypes_module_methods[] = {
"dlopen(name, flag={RTLD_GLOBAL|RTLD_LOCAL}) open a shared library"},
{"dlclose", py_dl_close, METH_VARARGS, "dlclose a library"},
{"dlsym", py_dl_sym, METH_VARARGS, "find symbol in shared library"},