aboutsummaryrefslogtreecommitdiff
path: root/examples/python2.7/patches
diff options
context:
space:
mode:
authorIgor Pashev <igor.pashev@nexenta.com>2013-04-17 14:08:01 +0400
committerIgor Pashev <igor.pashev@nexenta.com>2013-04-17 14:08:01 +0400
commit207cd347945b53a1034d0ed8ad35037cf497e471 (patch)
tree455f399a5c405141b9c2fe500bcf9be05534d304 /examples/python2.7/patches
parent62c2114fcab6cf9826089109633117494ace630e (diff)
downloadcibs-207cd347945b53a1034d0ed8ad35037cf497e471.tar.gz
Examples: vim & python
Diffstat (limited to 'examples/python2.7/patches')
-rw-r--r--examples/python2.7/patches/apport-support.dpatch42
-rw-r--r--examples/python2.7/patches/arm-float.dpatch119
-rw-r--r--examples/python2.7/patches/bdist-wininst-notfound.diff17
-rw-r--r--examples/python2.7/patches/bsddb-libpath.diff19
-rw-r--r--examples/python2.7/patches/ctypes-arm.diff32
-rw-r--r--examples/python2.7/patches/db5.1.diff1768
-rw-r--r--examples/python2.7/patches/deb-locations.diff63
-rw-r--r--examples/python2.7/patches/deb-setup.diff17
-rw-r--r--examples/python2.7/patches/debug-build.diff228
-rw-r--r--examples/python2.7/patches/disable-sem-check.diff28
-rw-r--r--examples/python2.7/patches/disable-ssl-cert-tests.diff75
-rw-r--r--examples/python2.7/patches/distutils-install-layout.diff380
-rw-r--r--examples/python2.7/patches/distutils-link.diff18
-rw-r--r--examples/python2.7/patches/distutils-sysconfig.diff33
-rw-r--r--examples/python2.7/patches/do-not-italicize-punctuation.patch50
-rw-r--r--examples/python2.7/patches/doc-faq.dpatch54
-rw-r--r--examples/python2.7/patches/doc-nodownload.diff13
-rw-r--r--examples/python2.7/patches/enable-fpectl.diff14
-rw-r--r--examples/python2.7/patches/hg-updates.diff27402
-rw-r--r--examples/python2.7/patches/hotshot-import.diff17
-rw-r--r--examples/python2.7/patches/hurd-broken-poll.diff23
-rw-r--r--examples/python2.7/patches/hurd-disable-nonworking-constants.diff34
-rw-r--r--examples/python2.7/patches/issue15340.diff16
-rw-r--r--examples/python2.7/patches/issue15847.diff47
-rw-r--r--examples/python2.7/patches/issue9012a.diff13
-rw-r--r--examples/python2.7/patches/issue9189.diff342
-rw-r--r--examples/python2.7/patches/issue9374-followup.diff30
-rw-r--r--examples/python2.7/patches/linecache.diff16
-rw-r--r--examples/python2.7/patches/link-system-expat.diff22
-rw-r--r--examples/python2.7/patches/link-whole-archive.diff13
-rw-r--r--examples/python2.7/patches/locale-module.diff17
-rw-r--r--examples/python2.7/patches/makeflags.diff24
-rw-r--r--examples/python2.7/patches/makesetup-bashism.diff13
-rw-r--r--examples/python2.7/patches/multiprocessing-typos.diff24
-rw-r--r--examples/python2.7/patches/ncursesw-incdir.diff66
-rw-r--r--examples/python2.7/patches/no-zip-on-sys.path.diff52
-rw-r--r--examples/python2.7/patches/plat-gnukfreebsd.diff2478
-rw-r--r--examples/python2.7/patches/platform-lsbrelease.diff41
-rw-r--r--examples/python2.7/patches/profiled-build.diff27
-rw-r--r--examples/python2.7/patches/pydebug-path.dpatch100
-rw-r--r--examples/python2.7/patches/revert-issue14635.diff294
-rw-r--r--examples/python2.7/patches/revert-sizeof-methods.diff1062
-rw-r--r--examples/python2.7/patches/series46
-rw-r--r--examples/python2.7/patches/setup-modules-heapq.diff12
-rw-r--r--examples/python2.7/patches/setup-modules-ssl.diff24
-rw-r--r--examples/python2.7/patches/setup-modules.dpatch75
-rw-r--r--examples/python2.7/patches/site-locations.diff32
-rw-r--r--examples/python2.7/patches/sqlite-rpath.diff10
-rw-r--r--examples/python2.7/patches/statvfs-f_flag-constants.diff57
-rw-r--r--examples/python2.7/patches/subprocess-eintr-safety.dpatch81
-rw-r--r--examples/python2.7/patches/sysconfigdata.diff91
-rw-r--r--examples/python2.7/patches/test-sundry.diff17
-rw-r--r--examples/python2.7/patches/tkinter-import.diff16
-rw-r--r--examples/python2.7/patches/webbrowser.diff27
-rw-r--r--examples/python2.7/patches/xdg-gvfs-open.diff17
55 files changed, 35648 insertions, 0 deletions
diff --git a/examples/python2.7/patches/apport-support.dpatch b/examples/python2.7/patches/apport-support.dpatch
new file mode 100644
index 0000000..a04ca90
--- /dev/null
+++ b/examples/python2.7/patches/apport-support.dpatch
@@ -0,0 +1,42 @@
+#! /bin/sh -e
+
+dir=
+if [ $# -eq 3 -a "$2" = '-d' ]; then
+ pdir="-d $3"
+ dir="$3/"
+elif [ $# -ne 1 ]; then
+ echo >&2 "usage: `basename $0`: -patch|-unpatch [-d <srcdir>]"
+ exit 1
+fi
+case "$1" in
+ -patch)
+ patch $pdir -f --no-backup-if-mismatch -p0 < $0
+ #cd ${dir}gcc && autoconf
+ ;;
+ -unpatch)
+ patch $pdir -f --no-backup-if-mismatch -R -p0 < $0
+ #rm ${dir}gcc/configure
+ ;;
+ *)
+ echo >&2 "usage: `basename $0`: -patch|-unpatch [-d <srcdir>]"
+ exit 1
+esac
+exit 0
+
+--- Lib/site.py 2004-07-20 12:28:28.000000000 +1000
++++ Lib/site.py 2006-11-09 09:28:32.000000000 +1100
+@@ -393,6 +393,14 @@
+ # this module is run as a script, because this code is executed twice.
+ if hasattr(sys, "setdefaultencoding"):
+ del sys.setdefaultencoding
++ # install the apport exception handler if available
++ try:
++ import apport_python_hook
++ except ImportError:
++ pass
++ else:
++ apport_python_hook.install()
++
+
+ main()
+
diff --git a/examples/python2.7/patches/arm-float.dpatch b/examples/python2.7/patches/arm-float.dpatch
new file mode 100644
index 0000000..e72e9e3
--- /dev/null
+++ b/examples/python2.7/patches/arm-float.dpatch
@@ -0,0 +1,119 @@
+#! /bin/sh -e
+
+# DP: Support mixed-endian IEEE floating point, as found in the ARM old-ABI.
+
+dir=
+if [ $# -eq 3 -a "$2" = '-d' ]; then
+ pdir="-d $3"
+ dir="$3/"
+elif [ $# -ne 1 ]; then
+ echo >&2 "usage: `basename $0`: -patch|-unpatch [-d <srcdir>]"
+ exit 1
+fi
+case "$1" in
+ -patch)
+ patch $pdir -f --no-backup-if-mismatch -p0 < $0
+ ;;
+ -unpatch)
+ patch $pdir -f --no-backup-if-mismatch -R -p0 < $0
+ ;;
+ *)
+ echo >&2 "usage: `basename $0`: -patch|-unpatch [-d <srcdir>]"
+ exit 1
+esac
+exit 0
+
+--- Objects/floatobject.c.orig 2006-05-25 10:53:30.000000000 -0500
++++ Objects/floatobject.c 2007-07-27 06:43:15.000000000 -0500
+@@ -982,7 +982,7 @@
+ /* this is for the benefit of the pack/unpack routines below */
+
+ typedef enum {
+- unknown_format, ieee_big_endian_format, ieee_little_endian_format
++ unknown_format, ieee_big_endian_format, ieee_little_endian_format, ieee_mixed_endian_format
+ } float_format_type;
+
+ static float_format_type double_format, float_format;
+@@ -1021,6 +1021,8 @@
+ return PyString_FromString("IEEE, little-endian");
+ case ieee_big_endian_format:
+ return PyString_FromString("IEEE, big-endian");
++ case ieee_mixed_endian_format:
++ return PyString_FromString("IEEE, mixed-endian");
+ default:
+ Py_FatalError("insane float_format or double_format");
+ return NULL;
+@@ -1073,11 +1075,14 @@
+ else if (strcmp(format, "IEEE, big-endian") == 0) {
+ f = ieee_big_endian_format;
+ }
++ else if (strcmp(format, "IEEE, mixed-endian") == 0) {
++ f = ieee_mixed_endian_format;
++ }
+ else {
+ PyErr_SetString(PyExc_ValueError,
+ "__setformat__() argument 2 must be "
+- "'unknown', 'IEEE, little-endian' or "
+- "'IEEE, big-endian'");
++ "'unknown', 'IEEE, little-endian', "
++ "'IEEE, big-endian' or 'IEEE, mixed-endian'");
+ return NULL;
+
+ }
+@@ -1230,6 +1235,8 @@
+ detected_double_format = ieee_big_endian_format;
+ else if (memcmp(&x, "\x05\x04\x03\x02\x01\xff\x3f\x43", 8) == 0)
+ detected_double_format = ieee_little_endian_format;
++ else if (memcmp(&x, "\x01\xff\x3f\x43\x05\x04\x03\x02", 8) == 0)
++ detected_double_format = ieee_mixed_endian_format;
+ else
+ detected_double_format = unknown_format;
+ }
+@@ -1565,8 +1572,19 @@
+ p += 7;
+ incr = -1;
+ }
++ else if (double_format == ieee_mixed_endian_format) {
++ if (le)
++ p += 4;
++ else {
++ p += 3;
++ incr = -1;
++ }
++ }
+
+ for (i = 0; i < 8; i++) {
++ if (double_format == ieee_mixed_endian_format && i == 4)
++ p += -8 * incr;
++
+ *p = *s++;
+ p += incr;
+ }
+@@ -1739,6 +1757,27 @@
+ }
+ memcpy(&x, buf, 8);
+ }
++ else if (double_format == ieee_mixed_endian_format) {
++ char buf[8];
++ char *d;
++ int i, incr = 1;
++
++ if (le)
++ d = &buf[4];
++ else
++ d = &buf[3];
++
++ for (i = 0; i < 4; i++) {
++ *d = *p++;
++ d += incr;
++ }
++ d += -8 * incr;
++ for (i = 0; i < 4; i++) {
++ *d = *p++;
++ d += incr;
++ }
++ memcpy(&x, buf, 8);
++ }
+ else {
+ memcpy(&x, p, 8);
+ }
diff --git a/examples/python2.7/patches/bdist-wininst-notfound.diff b/examples/python2.7/patches/bdist-wininst-notfound.diff
new file mode 100644
index 0000000..5b72bb1
--- /dev/null
+++ b/examples/python2.7/patches/bdist-wininst-notfound.diff
@@ -0,0 +1,17 @@
+# DP: the wininst-* files cannot be built within Debian, needing a
+# DP: zlib mingw build, which the zlib maintainer isn't going to provide.
+
+--- a/Lib/distutils/command/bdist_wininst.py
++++ b/Lib/distutils/command/bdist_wininst.py
+@@ -360,7 +360,10 @@
+ sfix = ''
+
+ filename = os.path.join(directory, "wininst-%.1f%s.exe" % (bv, sfix))
+- f = open(filename, "rb")
++ try:
++ f = open(filename, "rb")
++ except IOError, msg:
++ raise DistutilsFileError, str(msg) + ', %s not included in the Debian packages.' % filename
+ try:
+ return f.read()
+ finally:
diff --git a/examples/python2.7/patches/bsddb-libpath.diff b/examples/python2.7/patches/bsddb-libpath.diff
new file mode 100644
index 0000000..2dcb6f0
--- /dev/null
+++ b/examples/python2.7/patches/bsddb-libpath.diff
@@ -0,0 +1,19 @@
+# DP: Don't add the bsddb multilib path, if already in the standard lib path
+
+--- a/setup.py
++++ b/setup.py
+@@ -977,7 +977,13 @@
+ if db_setup_debug:
+ print "bsddb using BerkeleyDB lib:", db_ver, dblib
+ print "bsddb lib dir:", dblib_dir, " inc dir:", db_incdir
+- db_incs = [db_incdir]
++ # only add db_incdir/dblib_dir if not in the standard paths
++ if db_incdir in inc_dirs:
++ db_incs = []
++ else:
++ db_incs = [db_incdir]
++ if dblib_dir[0] in lib_dirs:
++ dblib_dir = []
+ dblibs = [dblib]
+ # We add the runtime_library_dirs argument because the
+ # BerkeleyDB lib we're linking against often isn't in the
diff --git a/examples/python2.7/patches/ctypes-arm.diff b/examples/python2.7/patches/ctypes-arm.diff
new file mode 100644
index 0000000..655998f
--- /dev/null
+++ b/examples/python2.7/patches/ctypes-arm.diff
@@ -0,0 +1,32 @@
+--- a/Lib/ctypes/util.py
++++ b/Lib/ctypes/util.py
+@@ -206,16 +206,27 @@
+
+ def _findSoname_ldconfig(name):
+ import struct
++ # XXX this code assumes that we know all unames and that a single
++ # ABI is supported per uname; instead we should find what the
++ # ABI is (e.g. check ABI of current process) or simply ask libc
++ # to load the library for us
++ uname = os.uname()[4]
++ # ARM has a variety of unames, e.g. armv7l
++ if uname.startswith("arm"):
++ uname = "arm"
+ if struct.calcsize('l') == 4:
+- machine = os.uname()[4] + '-32'
++ machine = uname + '-32'
+ else:
+- machine = os.uname()[4] + '-64'
++ machine = uname + '-64'
+ mach_map = {
+ 'x86_64-64': 'libc6,x86-64',
+ 'ppc64-64': 'libc6,64bit',
+ 'sparc64-64': 'libc6,64bit',
+ 's390x-64': 'libc6,64bit',
+ 'ia64-64': 'libc6,IA-64',
++ # this actually breaks on biarch or multiarch as the first
++ # library wins; uname doesn't tell us which ABI we're using
++ 'arm-32': 'libc6(,hard-float)?',
+ }
+ abi_type = mach_map.get(machine, 'libc6')
+
diff --git a/examples/python2.7/patches/db5.1.diff b/examples/python2.7/patches/db5.1.diff
new file mode 100644
index 0000000..8fdeb66
--- /dev/null
+++ b/examples/python2.7/patches/db5.1.diff
@@ -0,0 +1,1768 @@
+Index: b/setup.py
+===================================================================
+--- a/setup.py
++++ b/setup.py
+@@ -799,7 +799,7 @@
+ # a release. Most open source OSes come with one or more
+ # versions of BerkeleyDB already installed.
+
+- max_db_ver = (4, 8)
++ max_db_ver = (5, 1)
+ min_db_ver = (4, 1)
+ db_setup_debug = False # verbose debug prints from this script?
+
+@@ -821,7 +821,11 @@
+ return True
+
+ def gen_db_minor_ver_nums(major):
+- if major == 4:
++ if major == 5:
++ for x in range(max_db_ver[1]+1):
++ if allow_db_ver((5, x)):
++ yield x
++ elif major == 4:
+ for x in range(max_db_ver[1]+1):
+ if allow_db_ver((4, x)):
+ yield x
+Index: b/Lib/bsddb/__init__.py
+===================================================================
+--- a/Lib/bsddb/__init__.py
++++ b/Lib/bsddb/__init__.py
+@@ -33,7 +33,7 @@
+ #----------------------------------------------------------------------
+
+
+-"""Support for Berkeley DB 4.1 through 4.8 with a simple interface.
++"""Support for Berkeley DB 4.2 through 5.1 with a simple interface.
+
+ For the full featured object oriented interface use the bsddb.db module
+ instead. It mirrors the Oracle Berkeley DB C API.
+Index: b/Lib/bsddb/test/test_all.py
+===================================================================
+--- a/Lib/bsddb/test/test_all.py
++++ b/Lib/bsddb/test/test_all.py
+@@ -484,6 +484,8 @@
+ print '-=' * 38
+ print db.DB_VERSION_STRING
+ print 'bsddb.db.version(): %s' % (db.version(), )
++ if db.version() >= (5, 0) :
++ print 'bsddb.db.full_version(): %s' %repr(db.full_version())
+ print 'bsddb.db.__version__: %s' % db.__version__
+ print 'bsddb.db.cvsid: %s' % db.cvsid
+
+@@ -528,7 +530,8 @@
+
+ # This path can be overriden via "set_test_path_prefix()".
+ import os, os.path
+-get_new_path.prefix=os.path.join(os.sep,"tmp","z-Berkeley_DB")
++get_new_path.prefix=os.path.join(os.environ.get("TMPDIR",
++ os.path.join(os.sep,"tmp")), "z-Berkeley_DB")
+ get_new_path.num=0
+
+ def get_test_path_prefix() :
+Index: b/Lib/bsddb/test/test_associate.py
+===================================================================
+--- a/Lib/bsddb/test/test_associate.py
++++ b/Lib/bsddb/test/test_associate.py
+@@ -76,6 +76,11 @@
+ #----------------------------------------------------------------------
+
+ class AssociateErrorTestCase(unittest.TestCase):
++ import sys
++ if sys.version_info < (2, 4) :
++ def assertTrue(self, expr, msg=None):
++ self.failUnless(expr,msg=msg)
++
+ def setUp(self):
+ self.filename = self.__class__.__name__ + '.db'
+ self.homeDir = get_new_environment_path()
+@@ -120,6 +125,11 @@
+
+
+ class AssociateTestCase(unittest.TestCase):
++ import sys
++ if sys.version_info < (2, 4) :
++ def assertTrue(self, expr, msg=None):
++ self.failUnless(expr,msg=msg)
++
+ keytype = ''
+ envFlags = 0
+ dbFlags = 0
+Index: b/Lib/bsddb/test/test_basics.py
+===================================================================
+--- a/Lib/bsddb/test/test_basics.py
++++ b/Lib/bsddb/test/test_basics.py
+@@ -74,7 +74,6 @@
+ # create and open the DB
+ self.d = db.DB(self.env)
+ if not self.useEnv :
+- if db.version() >= (4, 2) :
+ self.d.set_cachesize(*self.cachesize)
+ cachesize = self.d.get_cachesize()
+ self.assertEqual(cachesize[0], self.cachesize[0])
+@@ -792,7 +791,6 @@
+ for log in logs:
+ if verbose:
+ print 'log file: ' + log
+- if db.version() >= (4,2):
+ logs = self.env.log_archive(db.DB_ARCH_REMOVE)
+ self.assertTrue(not logs)
+
+@@ -875,7 +873,6 @@
+
+ #----------------------------------------
+
+- if db.version() >= (4, 2) :
+ def test_get_tx_max(self) :
+ self.assertEqual(self.env.get_tx_max(), 30)
+
+Index: b/Lib/bsddb/test/test_compat.py
+===================================================================
+--- a/Lib/bsddb/test/test_compat.py
++++ b/Lib/bsddb/test/test_compat.py
+@@ -11,6 +11,11 @@
+
+
+ class CompatibilityTestCase(unittest.TestCase):
++ import sys
++ if sys.version_info < (2, 4) :
++ def assertTrue(self, expr, msg=None):
++ self.failUnless(expr,msg=msg)
++
+ def setUp(self):
+ self.filename = get_new_database_path()
+
+Index: b/Lib/bsddb/test/test_db.py
+===================================================================
+--- a/Lib/bsddb/test/test_db.py
++++ b/Lib/bsddb/test/test_db.py
+@@ -11,6 +11,8 @@
+ if sys.version_info < (2, 4) :
+ def assertTrue(self, expr, msg=None):
+ self.failUnless(expr,msg=msg)
++ def assertFalse(self, expr, msg=None):
++ self.failIf(expr,msg=msg)
+
+ def setUp(self):
+ self.path = get_new_database_path()
+@@ -19,10 +21,28 @@
+ def tearDown(self):
+ self.db.close()
+ del self.db
+- test_support.rmtree(self.path)
++ test_support.unlink(self.path)
+
+ class DB_general(DB) :
+- if db.version() >= (4, 2) :
++ def test_get_open_flags(self) :
++ self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
++ self.assertEqual(db.DB_CREATE, self.db.get_open_flags())
++
++ def test_get_open_flags2(self) :
++ self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE |
++ db.DB_THREAD)
++ self.assertEqual(db.DB_CREATE | db.DB_THREAD, self.db.get_open_flags())
++
++ def test_get_dbname_filename(self) :
++ self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
++ self.assertEqual((self.path, None), self.db.get_dbname())
++
++ def test_get_dbname_filename_database(self) :
++ name = "jcea-random-name"
++ self.db.open(self.path, dbname=name, dbtype=db.DB_HASH,
++ flags = db.DB_CREATE)
++ self.assertEqual((self.path, name), self.db.get_dbname())
++
+ def test_bt_minkey(self) :
+ for i in [17, 108, 1030] :
+ self.db.set_bt_minkey(i)
+@@ -44,8 +64,13 @@
+ self.db.set_priority(flag)
+ self.assertEqual(flag, self.db.get_priority())
+
++ if db.version() >= (4, 3) :
++ def test_get_transactional(self) :
++ self.assertFalse(self.db.get_transactional())
++ self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
++ self.assertFalse(self.db.get_transactional())
++
+ class DB_hash(DB) :
+- if db.version() >= (4, 2) :
+ def test_h_ffactor(self) :
+ for ffactor in [4, 16, 256] :
+ self.db.set_h_ffactor(ffactor)
+@@ -84,7 +109,6 @@
+ del self.env
+ test_support.rmtree(self.homeDir)
+
+- if db.version() >= (4, 2) :
+ def test_flags(self) :
+ self.db.set_flags(db.DB_CHKSUM)
+ self.assertEqual(db.DB_CHKSUM, self.db.get_flags())
+@@ -92,8 +116,15 @@
+ self.assertEqual(db.DB_TXN_NOT_DURABLE | db.DB_CHKSUM,
+ self.db.get_flags())
+
++ if db.version() >= (4, 3) :
++ def test_get_transactional(self) :
++ self.assertFalse(self.db.get_transactional())
++ # DB_AUTO_COMMIT = Implicit transaction
++ self.db.open("XXX", dbtype=db.DB_HASH,
++ flags = db.DB_CREATE | db.DB_AUTO_COMMIT)
++ self.assertTrue(self.db.get_transactional())
++
+ class DB_recno(DB) :
+- if db.version() >= (4, 2) :
+ def test_re_pad(self) :
+ for i in [' ', '*'] : # Check chars
+ self.db.set_re_pad(i)
+@@ -116,7 +147,6 @@
+ self.assertEqual(i, self.db.get_re_source())
+
+ class DB_queue(DB) :
+- if db.version() >= (4, 2) :
+ def test_re_len(self) :
+ for i in [33, 65, 300, 2000] :
+ self.db.set_re_len(i)
+Index: b/Lib/bsddb/test/test_dbenv.py
+===================================================================
+--- a/Lib/bsddb/test/test_dbenv.py
++++ b/Lib/bsddb/test/test_dbenv.py
+@@ -25,12 +25,31 @@
+ test_support.rmtree(self.homeDir)
+
+ class DBEnv_general(DBEnv) :
++ def test_get_open_flags(self) :
++ flags = db.DB_CREATE | db.DB_INIT_MPOOL
++ self.env.open(self.homeDir, flags)
++ self.assertEqual(flags, self.env.get_open_flags())
++
++ def test_get_open_flags2(self) :
++ flags = db.DB_CREATE | db.DB_INIT_MPOOL | \
++ db.DB_INIT_LOCK | db.DB_THREAD
++ self.env.open(self.homeDir, flags)
++ self.assertEqual(flags, self.env.get_open_flags())
++
+ if db.version() >= (4, 7) :
+ def test_lk_partitions(self) :
+ for i in [10, 20, 40] :
+ self.env.set_lk_partitions(i)
+ self.assertEqual(i, self.env.get_lk_partitions())
+
++ def test_getset_intermediate_dir_mode(self) :
++ self.assertEqual(None, self.env.get_intermediate_dir_mode())
++ for mode in ["rwx------", "rw-rw-rw-", "rw-r--r--"] :
++ self.env.set_intermediate_dir_mode(mode)
++ self.assertEqual(mode, self.env.get_intermediate_dir_mode())
++ self.assertRaises(db.DBInvalidArgError,
++ self.env.set_intermediate_dir_mode, "abcde")
++
+ if db.version() >= (4, 6) :
+ def test_thread(self) :
+ for i in [16, 100, 1000] :
+@@ -72,7 +91,6 @@
+ v=self.env.get_mp_max_write()
+ self.assertEqual((i, j), v)
+
+- if db.version() >= (4, 2) :
+ def test_invalid_txn(self) :
+ # This environment doesn't support transactions
+ self.assertRaises(db.DBInvalidArgError, self.env.txn_begin)
+@@ -115,7 +133,7 @@
+ self.assertEqual(i, self.env.get_lk_max_lockers())
+
+ def test_lg_regionmax(self) :
+- for i in [128, 256, 1024] :
++ for i in [128, 256, 1000] :
+ i = i*1024*1024
+ self.env.set_lg_regionmax(i)
+ j = self.env.get_lg_regionmax()
+@@ -172,8 +190,12 @@
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
+ cachesize = (0, 2*1024*1024, 1)
+ self.assertRaises(db.DBInvalidArgError,
+- self.env.set_cachesize, *cachesize)
+- self.assertEqual(cachesize2, self.env.get_cachesize())
++ self.env.set_cachesize, *cachesize)
++ cachesize3 = self.env.get_cachesize()
++ self.assertEqual(cachesize2[0], cachesize3[0])
++ self.assertEqual(cachesize2[2], cachesize3[2])
++ # In Berkeley DB 5.1, the cachesize can change when opening the Env
++ self.assertTrue(cachesize2[1] <= cachesize3[1])
+
+ def test_set_cachesize_dbenv_db(self) :
+ # You can not configure the cachesize using
+Index: b/Lib/bsddb/test/test_dbtables.py
+===================================================================
+--- a/Lib/bsddb/test/test_dbtables.py
++++ b/Lib/bsddb/test/test_dbtables.py
+@@ -38,6 +38,11 @@
+ #----------------------------------------------------------------------
+
+ class TableDBTestCase(unittest.TestCase):
++ import sys
++ if sys.version_info < (2, 4) :
++ def assertTrue(self, expr, msg=None):
++ self.failUnless(expr,msg=msg)
++
+ db_name = 'test-table.db'
+
+ def setUp(self):
+Index: b/Lib/bsddb/test/test_distributed_transactions.py
+===================================================================
+--- a/Lib/bsddb/test/test_distributed_transactions.py
++++ b/Lib/bsddb/test/test_distributed_transactions.py
+@@ -19,6 +19,11 @@
+ #----------------------------------------------------------------------
+
+ class DBTxn_distributed(unittest.TestCase):
++ import sys
++ if sys.version_info < (2, 4) :
++ def assertTrue(self, expr, msg=None):
++ self.failUnless(expr,msg=msg)
++
+ num_txns=1234
+ nosync=True
+ must_open_db=False
+@@ -37,15 +42,11 @@
+ self.db = db.DB(self.dbenv)
+ self.db.set_re_len(db.DB_GID_SIZE)
+ if must_open_db :
+- if db.version() >= (4,2) :
+ txn=self.dbenv.txn_begin()
+ self.db.open(self.filename,
+ db.DB_QUEUE, db.DB_CREATE | db.DB_THREAD, 0666,
+ txn=txn)
+ txn.commit()
+- else :
+- self.db.open(self.filename,
+- db.DB_QUEUE, db.DB_CREATE | db.DB_THREAD, 0666)
+
+ def setUp(self) :
+ self.homeDir = get_new_environment_path()
+Index: b/Lib/bsddb/test/test_get_none.py
+===================================================================
+--- a/Lib/bsddb/test/test_get_none.py
++++ b/Lib/bsddb/test/test_get_none.py
+@@ -11,6 +11,11 @@
+ #----------------------------------------------------------------------
+
+ class GetReturnsNoneTestCase(unittest.TestCase):
++ import sys
++ if sys.version_info < (2, 4) :
++ def assertTrue(self, expr, msg=None):
++ self.failUnless(expr,msg=msg)
++
+ def setUp(self):
+ self.filename = get_new_database_path()
+
+Index: b/Lib/bsddb/test/test_join.py
+===================================================================
+--- a/Lib/bsddb/test/test_join.py
++++ b/Lib/bsddb/test/test_join.py
+@@ -30,6 +30,11 @@
+ ]
+
+ class JoinTestCase(unittest.TestCase):
++ import sys
++ if sys.version_info < (2, 4) :
++ def assertTrue(self, expr, msg=None):
++ self.failUnless(expr,msg=msg)
++
+ keytype = ''
+
+ def setUp(self):
+Index: b/Lib/bsddb/test/test_lock.py
+===================================================================
+--- a/Lib/bsddb/test/test_lock.py
++++ b/Lib/bsddb/test/test_lock.py
+@@ -89,7 +89,6 @@
+ for t in threads:
+ t.join()
+
+- if db.version() >= (4, 2) :
+ def test03_lock_timeout(self):
+ self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
+ self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 0)
+Index: b/Lib/bsddb/test/test_misc.py
+===================================================================
+--- a/Lib/bsddb/test/test_misc.py
++++ b/Lib/bsddb/test/test_misc.py
+@@ -97,10 +97,6 @@
+ test_support.unlink(self.filename)
+
+ def test07_DB_set_flags_persists(self):
+- if db.version() < (4,2):
+- # The get_flags API required for this to work is only available
+- # in Berkeley DB >= 4.2
+- return
+ try:
+ db1 = db.DB()
+ db1.set_flags(db.DB_DUPSORT)
+Index: b/Lib/bsddb/test/test_queue.py
+===================================================================
+--- a/Lib/bsddb/test/test_queue.py
++++ b/Lib/bsddb/test/test_queue.py
+@@ -99,11 +99,6 @@
+ print '\n', '-=' * 30
+ print "Running %s.test02_basicPost32..." % self.__class__.__name__
+
+- if db.version() < (3, 2, 0):
+- if verbose:
+- print "Test not run, DB not new enough..."
+- return
+-
+ d = db.DB()
+ d.set_re_len(40) # Queues must be fixed length
+ d.open(self.filename, db.DB_QUEUE, db.DB_CREATE)
+Index: b/Lib/bsddb/test/test_recno.py
+===================================================================
+--- a/Lib/bsddb/test/test_recno.py
++++ b/Lib/bsddb/test/test_recno.py
+@@ -236,7 +236,9 @@
+ d.close()
+
+ # get the text from the backing source
+- text = open(source, 'r').read()
++ f = open(source, 'r')
++ text = f.read()
++ f.close()
+ text = text.strip()
+ if verbose:
+ print text
+@@ -256,7 +258,9 @@
+ d.sync()
+ d.close()
+
+- text = open(source, 'r').read()
++ f = open(source, 'r')
++ text = f.read()
++ f.close()
+ text = text.strip()
+ if verbose:
+ print text
+@@ -298,6 +302,18 @@
+ c.close()
+ d.close()
+
++ def test04_get_size_empty(self) :
++ d = db.DB()
++ d.open(self.filename, dbtype=db.DB_RECNO, flags=db.DB_CREATE)
++
++ row_id = d.append(' ')
++ self.assertEqual(1, d.get_size(key=row_id))
++ row_id = d.append('')
++ self.assertEqual(0, d.get_size(key=row_id))
++
++
++
++
+
+ #----------------------------------------------------------------------
+
+Index: b/Modules/_bsddb.c
+===================================================================
+--- a/Modules/_bsddb.c
++++ b/Modules/_bsddb.c
+@@ -202,9 +202,7 @@
+ static PyObject* DBNoSuchFileError; /* ENOENT */
+ static PyObject* DBPermissionsError; /* EPERM */
+
+-#if (DBVER >= 42)
+ static PyObject* DBRepHandleDeadError; /* DB_REP_HANDLE_DEAD */
+-#endif
+ #if (DBVER >= 44)
+ static PyObject* DBRepLockoutError; /* DB_REP_LOCKOUT */
+ #endif
+@@ -715,9 +713,7 @@
+ case ENOENT: errObj = DBNoSuchFileError; break;
+ case EPERM : errObj = DBPermissionsError; break;
+
+-#if (DBVER >= 42)
+ case DB_REP_HANDLE_DEAD : errObj = DBRepHandleDeadError; break;
+-#endif
+ #if (DBVER >= 44)
+ case DB_REP_LOCKOUT : errObj = DBRepLockoutError; break;
+ #endif
+@@ -2132,7 +2128,7 @@
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->db->get(self->db, txn, &key, &data, flags);
+ MYDB_END_ALLOW_THREADS;
+- if (err == DB_BUFFER_SMALL) {
++ if ((err == DB_BUFFER_SMALL) || (err == 0)) {
+ retval = NUMBER_FromLong((long)data.size);
+ err = 0;
+ }
+@@ -2385,9 +2381,7 @@
+ return NULL;
+ }
+
+-#if (DBVER >= 42)
+ self->db->get_flags(self->db, &self->setflags);
+-#endif
+
+ self->flags = flags;
+
+@@ -2539,6 +2533,37 @@
+ #endif
+
+ static PyObject*
++DB_get_dbname(DBObject* self)
++{
++ int err;
++ const char *filename, *dbname;
++
++ CHECK_DB_NOT_CLOSED(self);
++
++ MYDB_BEGIN_ALLOW_THREADS;
++ err = self->db->get_dbname(self->db, &filename, &dbname);
++ MYDB_END_ALLOW_THREADS;
++ RETURN_IF_ERR();
++ /* If "dbname==NULL", it is correctly converted to "None" */
++ return Py_BuildValue("(ss)", filename, dbname);
++}
++
++static PyObject*
++DB_get_open_flags(DBObject* self)
++{
++ int err;
++ unsigned int flags;
++
++ CHECK_DB_NOT_CLOSED(self);
++
++ MYDB_BEGIN_ALLOW_THREADS;
++ err = self->db->get_open_flags(self->db, &flags);
++ MYDB_END_ALLOW_THREADS;
++ RETURN_IF_ERR();
++ return NUMBER_FromLong(flags);
++}
++
++static PyObject*
+ DB_set_q_extentsize(DBObject* self, PyObject* args)
+ {
+ int err;
+@@ -2555,7 +2580,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_q_extentsize(DBObject* self)
+ {
+@@ -2570,7 +2594,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(extentsize);
+ }
+-#endif
+
+ static PyObject*
+ DB_set_bt_minkey(DBObject* self, PyObject* args)
+@@ -2588,7 +2611,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_bt_minkey(DBObject* self)
+ {
+@@ -2603,7 +2625,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(bt_minkey);
+ }
+-#endif
+
+ static int
+ _default_cmp(const DBT *leftKey,
+@@ -2759,7 +2780,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_cachesize(DBObject* self)
+ {
+@@ -2777,7 +2797,6 @@
+
+ return Py_BuildValue("(iii)", gbytes, bytes, ncache);
+ }
+-#endif
+
+ static PyObject*
+ DB_set_flags(DBObject* self, PyObject* args)
+@@ -2797,7 +2816,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_flags(DBObject* self)
+ {
+@@ -2812,6 +2830,35 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(flags);
+ }
++
++#if (DBVER >= 43)
++static PyObject*
++DB_get_transactional(DBObject* self)
++{
++ int err;
++
++ CHECK_DB_NOT_CLOSED(self);
++
++ MYDB_BEGIN_ALLOW_THREADS;
++ err = self->db->get_transactional(self->db);
++ MYDB_END_ALLOW_THREADS;
++
++ if(err == 0) {
++ Py_INCREF(Py_False);
++ return Py_False;
++ } else if(err == 1) {
++ Py_INCREF(Py_True);
++ return Py_True;
++ }
++
++ /*
++ ** If we reach there, there was an error. The
++ ** "return" should be unreachable.
++ */
++ RETURN_IF_ERR();
++ assert(0); /* This coude SHOULD be unreachable */
++ return NULL;
++}
+ #endif
+
+ static PyObject*
+@@ -2830,7 +2877,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_h_ffactor(DBObject* self)
+ {
+@@ -2845,7 +2891,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(ffactor);
+ }
+-#endif
+
+ static PyObject*
+ DB_set_h_nelem(DBObject* self, PyObject* args)
+@@ -2863,7 +2908,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_h_nelem(DBObject* self)
+ {
+@@ -2878,7 +2922,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(nelem);
+ }
+-#endif
+
+ static PyObject*
+ DB_set_lorder(DBObject* self, PyObject* args)
+@@ -2896,7 +2939,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_lorder(DBObject* self)
+ {
+@@ -2911,7 +2953,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(lorder);
+ }
+-#endif
+
+ static PyObject*
+ DB_set_pagesize(DBObject* self, PyObject* args)
+@@ -2929,7 +2970,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_pagesize(DBObject* self)
+ {
+@@ -2944,7 +2984,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(pagesize);
+ }
+-#endif
+
+ static PyObject*
+ DB_set_re_delim(DBObject* self, PyObject* args)
+@@ -2967,7 +3006,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_re_delim(DBObject* self)
+ {
+@@ -2981,7 +3019,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(re_delim);
+ }
+-#endif
+
+ static PyObject*
+ DB_set_re_len(DBObject* self, PyObject* args)
+@@ -2999,7 +3036,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_re_len(DBObject* self)
+ {
+@@ -3014,7 +3050,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(re_len);
+ }
+-#endif
+
+ static PyObject*
+ DB_set_re_pad(DBObject* self, PyObject* args)
+@@ -3036,7 +3071,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_re_pad(DBObject* self)
+ {
+@@ -3050,7 +3084,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(re_pad);
+ }
+-#endif
+
+ static PyObject*
+ DB_set_re_source(DBObject* self, PyObject* args)
+@@ -3069,7 +3102,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_re_source(DBObject* self)
+ {
+@@ -3084,7 +3116,6 @@
+ RETURN_IF_ERR();
+ return PyBytes_FromString(source);
+ }
+-#endif
+
+ static PyObject*
+ DB_stat(DBObject* self, PyObject* args, PyObject* kwargs)
+@@ -3381,7 +3412,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DB_get_encrypt_flags(DBObject* self)
+ {
+@@ -3396,7 +3426,6 @@
+
+ return NUMBER_FromLong(flags);
+ }
+-#endif
+
+
+
+@@ -4987,7 +5016,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_encrypt_flags(DBEnvObject* self)
+ {
+@@ -5025,7 +5053,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(timeout);
+ }
+-#endif
+
+
+ static PyObject*
+@@ -5064,7 +5091,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_shm_key(DBEnvObject* self)
+ {
+@@ -5081,7 +5107,6 @@
+
+ return NUMBER_FromLong(shm_key);
+ }
+-#endif
+
+ #if (DBVER >= 46)
+ static PyObject*
+@@ -5170,7 +5195,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_cachesize(DBEnvObject* self)
+ {
+@@ -5188,7 +5212,6 @@
+
+ return Py_BuildValue("(iii)", gbytes, bytes, ncache);
+ }
+-#endif
+
+
+ static PyObject*
+@@ -5208,7 +5231,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_flags(DBEnvObject* self)
+ {
+@@ -5223,7 +5245,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(flags);
+ }
+-#endif
+
+ #if (DBVER >= 47)
+ static PyObject*
+@@ -5423,7 +5444,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_data_dirs(DBEnvObject* self)
+ {
+@@ -5463,7 +5483,6 @@
+ }
+ return tuple;
+ }
+-#endif
+
+ #if (DBVER >= 44)
+ static PyObject*
+@@ -5513,7 +5532,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_lg_bsize(DBEnvObject* self)
+ {
+@@ -5528,7 +5546,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(lg_bsize);
+ }
+-#endif
+
+ static PyObject*
+ DBEnv_set_lg_dir(DBEnvObject* self, PyObject* args)
+@@ -5547,7 +5564,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_lg_dir(DBEnvObject* self)
+ {
+@@ -5562,7 +5578,6 @@
+ RETURN_IF_ERR();
+ return PyBytes_FromString(dirp);
+ }
+-#endif
+
+ static PyObject*
+ DBEnv_set_lg_max(DBEnvObject* self, PyObject* args)
+@@ -5580,7 +5595,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_lg_max(DBEnvObject* self)
+ {
+@@ -5595,8 +5609,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(lg_max);
+ }
+-#endif
+-
+
+ static PyObject*
+ DBEnv_set_lg_regionmax(DBEnvObject* self, PyObject* args)
+@@ -5614,7 +5626,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_lg_regionmax(DBEnvObject* self)
+ {
+@@ -5629,7 +5640,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(lg_regionmax);
+ }
+-#endif
+
+ #if (DBVER >= 47)
+ static PyObject*
+@@ -5680,7 +5690,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_lk_detect(DBEnvObject* self)
+ {
+@@ -5695,8 +5704,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(lk_detect);
+ }
+-#endif
+-
+
+ #if (DBVER < 45)
+ static PyObject*
+@@ -5734,7 +5741,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_lk_max_locks(DBEnvObject* self)
+ {
+@@ -5749,7 +5755,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(lk_max);
+ }
+-#endif
+
+ static PyObject*
+ DBEnv_set_lk_max_lockers(DBEnvObject* self, PyObject* args)
+@@ -5767,7 +5772,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_lk_max_lockers(DBEnvObject* self)
+ {
+@@ -5782,7 +5786,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(lk_max);
+ }
+-#endif
+
+ static PyObject*
+ DBEnv_set_lk_max_objects(DBEnvObject* self, PyObject* args)
+@@ -5800,7 +5803,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_lk_max_objects(DBEnvObject* self)
+ {
+@@ -5815,9 +5817,7 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(lk_max);
+ }
+-#endif
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_mp_mmapsize(DBEnvObject* self)
+ {
+@@ -5832,8 +5832,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(mmapsize);
+ }
+-#endif
+-
+
+ static PyObject*
+ DBEnv_set_mp_mmapsize(DBEnvObject* self, PyObject* args)
+@@ -5869,8 +5867,6 @@
+ RETURN_NONE();
+ }
+
+-
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_tmp_dir(DBEnvObject* self)
+ {
+@@ -5887,8 +5883,6 @@
+
+ return PyBytes_FromString(dirpp);
+ }
+-#endif
+-
+
+ static PyObject*
+ DBEnv_txn_recover(DBEnvObject* self)
+@@ -6003,8 +5997,6 @@
+ RETURN_NONE();
+ }
+
+-
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_tx_max(DBEnvObject* self)
+ {
+@@ -6019,8 +6011,6 @@
+ RETURN_IF_ERR();
+ return PyLong_FromUnsignedLong(max);
+ }
+-#endif
+-
+
+ static PyObject*
+ DBEnv_set_tx_max(DBEnvObject* self, PyObject* args)
+@@ -6038,8 +6028,6 @@
+ RETURN_NONE();
+ }
+
+-
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_tx_timestamp(DBEnvObject* self)
+ {
+@@ -6054,7 +6042,6 @@
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(timestamp);
+ }
+-#endif
+
+ static PyObject*
+ DBEnv_set_tx_timestamp(DBEnvObject* self, PyObject* args)
+@@ -6756,6 +6743,55 @@
+ RETURN_NONE();
+ }
+
++#if (DBVER >= 47)
++static PyObject*
++DBEnv_set_intermediate_dir_mode(DBEnvObject* self, PyObject* args)
++{
++ int err;
++ const char *mode;
++
++ if (!PyArg_ParseTuple(args,"s:set_intermediate_dir_mode", &mode))
++ return NULL;
++
++ CHECK_ENV_NOT_CLOSED(self);
++
++ MYDB_BEGIN_ALLOW_THREADS;
++ err = self->db_env->set_intermediate_dir_mode(self->db_env, mode);
++ MYDB_END_ALLOW_THREADS;
++ RETURN_IF_ERR();
++ RETURN_NONE();
++}
++
++static PyObject*
++DBEnv_get_intermediate_dir_mode(DBEnvObject* self)
++{
++ int err;
++ const char *mode;
++
++ CHECK_ENV_NOT_CLOSED(self);
++
++ MYDB_BEGIN_ALLOW_THREADS;
++ err = self->db_env->get_intermediate_dir_mode(self->db_env, &mode);
++ MYDB_END_ALLOW_THREADS;
++ RETURN_IF_ERR();
++ return Py_BuildValue("s", mode);
++}
++#endif
++
++static PyObject*
++DBEnv_get_open_flags(DBEnvObject* self)
++{
++ int err;
++ unsigned int flags;
++
++ CHECK_ENV_NOT_CLOSED(self);
++
++ MYDB_BEGIN_ALLOW_THREADS;
++ err = self->db_env->get_open_flags(self->db_env, &flags);
++ MYDB_END_ALLOW_THREADS;
++ RETURN_IF_ERR();
++ return NUMBER_FromLong(flags);
++}
+
+ #if (DBVER < 48)
+ static PyObject*
+@@ -6875,7 +6911,6 @@
+ RETURN_NONE();
+ }
+
+-#if (DBVER >= 42)
+ static PyObject*
+ DBEnv_get_verbose(DBEnvObject* self, PyObject* args)
+ {
+@@ -6893,7 +6928,6 @@
+ RETURN_IF_ERR();
+ return PyBool_FromLong(verbose);
+ }
+-#endif
+
+ #if (DBVER >= 45)
+ static void
+@@ -6975,9 +7009,7 @@
+ PyObject *control_py, *rec_py;
+ DBT control, rec;
+ int envid;
+-#if (DBVER >= 42)
+ DB_LSN lsn;
+-#endif
+
+ if (!PyArg_ParseTuple(args, "OOi:rep_process_message", &control_py,
+ &rec_py, &envid))
+@@ -6994,13 +7026,8 @@
+ err = self->db_env->rep_process_message(self->db_env, &control, &rec,
+ envid, &lsn);
+ #else
+-#if (DBVER >= 42)
+ err = self->db_env->rep_process_message(self->db_env, &control, &rec,
+ &envid, &lsn);
+-#else
+- err = self->db_env->rep_process_message(self->db_env, &control, &rec,
+- &envid);
+-#endif
+ #endif
+ MYDB_END_ALLOW_THREADS;
+ switch (err) {
+@@ -7029,12 +7056,10 @@
+ return r;
+ break;
+ }
+-#if (DBVER >= 42)
+ case DB_REP_NOTPERM :
+ case DB_REP_ISPERM :
+ return Py_BuildValue("(i(ll))", err, lsn.file, lsn.offset);
+ break;
+-#endif
+ }
+ RETURN_IF_ERR();
+ return Py_BuildValue("(OO)", Py_None, Py_None);
+@@ -7086,20 +7111,6 @@
+ return ret;
+ }
+
+-#if (DBVER <= 41)
+-static int
+-_DBEnv_rep_transportCallbackOLD(DB_ENV* db_env, const DBT* control, const DBT* rec,
+- int envid, u_int32_t flags)
+-{
+- DB_LSN lsn;
+-
+- lsn.file = -1; /* Dummy values */
+- lsn.offset = -1;
+- return _DBEnv_rep_transportCallback(db_env, control, rec, &lsn, envid,
+- flags);
+-}
+-#endif
+-
+ static PyObject*
+ DBEnv_rep_set_transport(DBEnvObject* self, PyObject* args)
+ {
+@@ -7120,13 +7131,8 @@
+ err = self->db_env->rep_set_transport(self->db_env, envid,
+ &_DBEnv_rep_transportCallback);
+ #else
+-#if (DBVER >= 42)
+ err = self->db_env->set_rep_transport(self->db_env, envid,
+ &_DBEnv_rep_transportCallback);
+-#else
+- err = self->db_env->set_rep_transport(self->db_env, envid,
+- &_DBEnv_rep_transportCallbackOLD);
+-#endif
+ #endif
+ MYDB_END_ALLOW_THREADS;
+ RETURN_IF_ERR();
+@@ -8482,65 +8488,43 @@
+ {"remove", (PyCFunction)DB_remove, METH_VARARGS|METH_KEYWORDS},
+ {"rename", (PyCFunction)DB_rename, METH_VARARGS},
+ {"set_bt_minkey", (PyCFunction)DB_set_bt_minkey, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_bt_minkey", (PyCFunction)DB_get_bt_minkey, METH_NOARGS},
+-#endif
+ {"set_bt_compare", (PyCFunction)DB_set_bt_compare, METH_O},
+ {"set_cachesize", (PyCFunction)DB_set_cachesize, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_cachesize", (PyCFunction)DB_get_cachesize, METH_NOARGS},
+-#endif
+ {"set_encrypt", (PyCFunction)DB_set_encrypt, METH_VARARGS|METH_KEYWORDS},
+-#if (DBVER >= 42)
+ {"get_encrypt_flags", (PyCFunction)DB_get_encrypt_flags, METH_NOARGS},
+-#endif
+-
+ {"set_flags", (PyCFunction)DB_set_flags, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_flags", (PyCFunction)DB_get_flags, METH_NOARGS},
++#if (DBVER >= 43)
++ {"get_transactional", (PyCFunction)DB_get_transactional, METH_NOARGS},
+ #endif
+ {"set_h_ffactor", (PyCFunction)DB_set_h_ffactor, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_h_ffactor", (PyCFunction)DB_get_h_ffactor, METH_NOARGS},
+-#endif
+ {"set_h_nelem", (PyCFunction)DB_set_h_nelem, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_h_nelem", (PyCFunction)DB_get_h_nelem, METH_NOARGS},
+-#endif
+ {"set_lorder", (PyCFunction)DB_set_lorder, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_lorder", (PyCFunction)DB_get_lorder, METH_NOARGS},
+-#endif
+ {"set_pagesize", (PyCFunction)DB_set_pagesize, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_pagesize", (PyCFunction)DB_get_pagesize, METH_NOARGS},
+-#endif
+ {"set_re_delim", (PyCFunction)DB_set_re_delim, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_re_delim", (PyCFunction)DB_get_re_delim, METH_NOARGS},
+-#endif
+ {"set_re_len", (PyCFunction)DB_set_re_len, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_re_len", (PyCFunction)DB_get_re_len, METH_NOARGS},
+-#endif
+ {"set_re_pad", (PyCFunction)DB_set_re_pad, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_re_pad", (PyCFunction)DB_get_re_pad, METH_NOARGS},
+-#endif
+ {"set_re_source", (PyCFunction)DB_set_re_source, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_re_source", (PyCFunction)DB_get_re_source, METH_NOARGS},
+-#endif
+ {"set_q_extentsize",(PyCFunction)DB_set_q_extentsize, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_q_extentsize",(PyCFunction)DB_get_q_extentsize, METH_NOARGS},
+-#endif
+ {"set_private", (PyCFunction)DB_set_private, METH_O},
+ {"get_private", (PyCFunction)DB_get_private, METH_NOARGS},
+ #if (DBVER >= 46)
+ {"set_priority", (PyCFunction)DB_set_priority, METH_VARARGS},
+ {"get_priority", (PyCFunction)DB_get_priority, METH_NOARGS},
+ #endif
++ {"get_dbname", (PyCFunction)DB_get_dbname, METH_NOARGS},
++ {"get_open_flags", (PyCFunction)DB_get_open_flags, METH_NOARGS},
+ {"stat", (PyCFunction)DB_stat, METH_VARARGS|METH_KEYWORDS},
+ #if (DBVER >= 43)
+ {"stat_print", (PyCFunction)DB_stat_print,
+@@ -8639,24 +8623,18 @@
+ {"get_thread_count", (PyCFunction)DBEnv_get_thread_count, METH_NOARGS},
+ #endif
+ {"set_encrypt", (PyCFunction)DBEnv_set_encrypt, METH_VARARGS|METH_KEYWORDS},
+-#if (DBVER >= 42)
+ {"get_encrypt_flags", (PyCFunction)DBEnv_get_encrypt_flags, METH_NOARGS},
+ {"get_timeout", (PyCFunction)DBEnv_get_timeout,
+ METH_VARARGS|METH_KEYWORDS},
+-#endif
+ {"set_timeout", (PyCFunction)DBEnv_set_timeout, METH_VARARGS|METH_KEYWORDS},
+ {"set_shm_key", (PyCFunction)DBEnv_set_shm_key, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_shm_key", (PyCFunction)DBEnv_get_shm_key, METH_NOARGS},
+-#endif
+ #if (DBVER >= 46)
+ {"set_cache_max", (PyCFunction)DBEnv_set_cache_max, METH_VARARGS},
+ {"get_cache_max", (PyCFunction)DBEnv_get_cache_max, METH_NOARGS},
+ #endif
+ {"set_cachesize", (PyCFunction)DBEnv_set_cachesize, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_cachesize", (PyCFunction)DBEnv_get_cachesize, METH_NOARGS},
+-#endif
+ {"memp_trickle", (PyCFunction)DBEnv_memp_trickle, METH_VARARGS},
+ {"memp_sync", (PyCFunction)DBEnv_memp_sync, METH_VARARGS},
+ {"memp_stat", (PyCFunction)DBEnv_memp_stat,
+@@ -8685,33 +8663,21 @@
+ #endif
+ #endif
+ {"set_data_dir", (PyCFunction)DBEnv_set_data_dir, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_data_dirs", (PyCFunction)DBEnv_get_data_dirs, METH_NOARGS},
+-#endif
+-#if (DBVER >= 42)
+ {"get_flags", (PyCFunction)DBEnv_get_flags, METH_NOARGS},
+-#endif
+ {"set_flags", (PyCFunction)DBEnv_set_flags, METH_VARARGS},
+ #if (DBVER >= 47)
+ {"log_set_config", (PyCFunction)DBEnv_log_set_config, METH_VARARGS},
+ {"log_get_config", (PyCFunction)DBEnv_log_get_config, METH_VARARGS},
+ #endif
+ {"set_lg_bsize", (PyCFunction)DBEnv_set_lg_bsize, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_lg_bsize", (PyCFunction)DBEnv_get_lg_bsize, METH_NOARGS},
+-#endif
+ {"set_lg_dir", (PyCFunction)DBEnv_set_lg_dir, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_lg_dir", (PyCFunction)DBEnv_get_lg_dir, METH_NOARGS},
+-#endif
+ {"set_lg_max", (PyCFunction)DBEnv_set_lg_max, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_lg_max", (PyCFunction)DBEnv_get_lg_max, METH_NOARGS},
+-#endif
+ {"set_lg_regionmax",(PyCFunction)DBEnv_set_lg_regionmax, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_lg_regionmax",(PyCFunction)DBEnv_get_lg_regionmax, METH_NOARGS},
+-#endif
+ #if (DBVER >= 44)
+ {"set_lg_filemode", (PyCFunction)DBEnv_set_lg_filemode, METH_VARARGS},
+ {"get_lg_filemode", (PyCFunction)DBEnv_get_lg_filemode, METH_NOARGS},
+@@ -8721,36 +8687,24 @@
+ {"get_lk_partitions", (PyCFunction)DBEnv_get_lk_partitions, METH_NOARGS},
+ #endif
+ {"set_lk_detect", (PyCFunction)DBEnv_set_lk_detect, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_lk_detect", (PyCFunction)DBEnv_get_lk_detect, METH_NOARGS},
+-#endif
+ #if (DBVER < 45)
+ {"set_lk_max", (PyCFunction)DBEnv_set_lk_max, METH_VARARGS},
+ #endif
+ {"set_lk_max_locks", (PyCFunction)DBEnv_set_lk_max_locks, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_lk_max_locks", (PyCFunction)DBEnv_get_lk_max_locks, METH_NOARGS},
+-#endif
+ {"set_lk_max_lockers", (PyCFunction)DBEnv_set_lk_max_lockers, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_lk_max_lockers", (PyCFunction)DBEnv_get_lk_max_lockers, METH_NOARGS},
+-#endif
+ {"set_lk_max_objects", (PyCFunction)DBEnv_set_lk_max_objects, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_lk_max_objects", (PyCFunction)DBEnv_get_lk_max_objects, METH_NOARGS},
+-#endif
+ #if (DBVER >= 43)
+ {"stat_print", (PyCFunction)DBEnv_stat_print,
+ METH_VARARGS|METH_KEYWORDS},
+ #endif
+ {"set_mp_mmapsize", (PyCFunction)DBEnv_set_mp_mmapsize, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_mp_mmapsize", (PyCFunction)DBEnv_get_mp_mmapsize, METH_NOARGS},
+-#endif
+ {"set_tmp_dir", (PyCFunction)DBEnv_set_tmp_dir, METH_VARARGS},
+-#if (DBVER >= 42)
+ {"get_tmp_dir", (PyCFunction)DBEnv_get_tmp_dir, METH_NOARGS},
+-#endif
+ {"txn_begin", (PyCFunction)DBEnv_txn_begin, METH_VARARGS|METH_KEYWORDS},
+ {"txn_checkpoint", (PyCFunction)DBEnv_txn_checkpoint, METH_VARARGS},
+ {"txn_stat", (PyCFunction)DBEnv_txn_stat, METH_VARARGS},
+@@ -8758,10 +8712,8 @@
+ {"txn_stat_print", (PyCFunction)DBEnv_txn_stat_print,
+ METH_VARARGS|METH_KEYWORDS},
+ #endif
+-#if (DBVER >= 42)
+ {"get_tx_max", (PyCFunction)DBEnv_get_tx_max, METH_NOARGS},
+ {"get_tx_timestamp", (PyCFunction)DBEnv_get_tx_timestamp, METH_NOARGS},
+-#endif
+ {"set_tx_max", (PyCFunction)DBEnv_set_tx_max, METH_VARARGS},
+ {"set_tx_timestamp", (PyCFunction)DBEnv_set_tx_timestamp, METH_VARARGS},
+ {"lock_detect", (PyCFunction)DBEnv_lock_detect, METH_VARARGS},
+@@ -8804,11 +8756,16 @@
+ {"get_mp_max_write", (PyCFunction)DBEnv_get_mp_max_write, METH_NOARGS},
+ #endif
+ {"set_verbose", (PyCFunction)DBEnv_set_verbose, METH_VARARGS},
+-#if (DBVER >= 42)
+- {"get_verbose", (PyCFunction)DBEnv_get_verbose, METH_VARARGS},
++ {"get_verbose", (PyCFunction)DBEnv_get_verbose, METH_VARARGS},
++ {"set_private", (PyCFunction)DBEnv_set_private, METH_O},
++ {"get_private", (PyCFunction)DBEnv_get_private, METH_NOARGS},
++ {"get_open_flags", (PyCFunction)DBEnv_get_open_flags, METH_NOARGS},
++#if (DBVER >= 47)
++ {"set_intermediate_dir_mode", (PyCFunction)DBEnv_set_intermediate_dir_mode,
++ METH_VARARGS},
++ {"get_intermediate_dir_mode", (PyCFunction)DBEnv_get_intermediate_dir_mode,
++ METH_NOARGS},
+ #endif
+- {"set_private", (PyCFunction)DBEnv_set_private, METH_O},
+- {"get_private", (PyCFunction)DBEnv_get_private, METH_NOARGS},
+ {"rep_start", (PyCFunction)DBEnv_rep_start,
+ METH_VARARGS|METH_KEYWORDS},
+ {"rep_set_transport", (PyCFunction)DBEnv_rep_set_transport, METH_VARARGS},
+@@ -8922,13 +8879,9 @@
+
+ CHECK_ENV_NOT_CLOSED(self);
+
+-#if (DBVER >= 42)
+ MYDB_BEGIN_ALLOW_THREADS;
+ self->db_env->get_home(self->db_env, &home);
+ MYDB_END_ALLOW_THREADS;
+-#else
+- home=self->db_env->db_home;
+-#endif
+
+ if (home == NULL) {
+ RETURN_NONE();
+@@ -9298,10 +9251,25 @@
+ {
+ int major, minor, patch;
+
++ /* This should be instantaneous, no need to release the GIL */
+ db_version(&major, &minor, &patch);
+ return Py_BuildValue("(iii)", major, minor, patch);
+ }
+
++#if (DBVER >= 50)
++static PyObject*
++bsddb_version_full(PyObject* self)
++{
++ char *version_string;
++ int family, release, major, minor, patch;
++
++ /* This should be instantaneous, no need to release the GIL */
++ version_string = db_full_version(&family, &release, &major, &minor, &patch);
++ return Py_BuildValue("(siiiii)",
++ version_string, family, release, major, minor, patch);
++}
++#endif
++
+
+ /* List of functions defined in the module */
+ static PyMethodDef bsddb_methods[] = {
+@@ -9311,6 +9279,9 @@
+ {"DBSequence", (PyCFunction)DBSequence_construct, METH_VARARGS | METH_KEYWORDS },
+ #endif
+ {"version", (PyCFunction)bsddb_version, METH_NOARGS, bsddb_version_doc},
++#if (DBVER >= 50)
++ {"full_version", (PyCFunction)bsddb_version_full, METH_NOARGS},
++#endif
+ {NULL, NULL} /* sentinel */
+ };
+
+@@ -9328,6 +9299,11 @@
+ */
+ #define ADD_INT(dict, NAME) _addIntToDict(dict, #NAME, NAME)
+
++/*
++** We can rename the module at import time, so the string allocated
++** must be big enough, and any use of the name must use this particular
++** string.
++*/
+ #define MODULE_NAME_MAX_LEN 11
+ static char _bsddbModuleName[MODULE_NAME_MAX_LEN+1] = "_bsddb";
+
+@@ -9428,13 +9404,7 @@
+ ADD_INT(d, DB_MAX_RECORDS);
+
+ #if (DBVER < 48)
+-#if (DBVER >= 42)
+ ADD_INT(d, DB_RPCCLIENT);
+-#else
+- ADD_INT(d, DB_CLIENT);
+- /* allow apps to be written using DB_RPCCLIENT on older Berkeley DB */
+- _addIntToDict(d, "DB_RPCCLIENT", DB_CLIENT);
+-#endif
+ #endif
+
+ #if (DBVER < 48)
+@@ -9477,6 +9447,14 @@
+ ADD_INT(d, DB_TXN_SYNC);
+ ADD_INT(d, DB_TXN_NOWAIT);
+
++#if (DBVER >= 51)
++ ADD_INT(d, DB_TXN_BULK);
++#endif
++
++#if (DBVER >= 48)
++ ADD_INT(d, DB_CURSOR_BULK);
++#endif
++
+ #if (DBVER >= 46)
+ ADD_INT(d, DB_TXN_WAIT);
+ #endif
+@@ -9561,9 +9539,7 @@
+ ADD_INT(d, DB_ARCH_ABS);
+ ADD_INT(d, DB_ARCH_DATA);
+ ADD_INT(d, DB_ARCH_LOG);
+-#if (DBVER >= 42)
+ ADD_INT(d, DB_ARCH_REMOVE);
+-#endif
+
+ ADD_INT(d, DB_BTREE);
+ ADD_INT(d, DB_HASH);
+@@ -9591,9 +9567,6 @@
+ ADD_INT(d, DB_CACHED_COUNTS);
+ #endif
+
+-#if (DBVER <= 41)
+- ADD_INT(d, DB_COMMIT);
+-#endif
+ ADD_INT(d, DB_CONSUME);
+ ADD_INT(d, DB_CONSUME_WAIT);
+ ADD_INT(d, DB_CURRENT);
+@@ -9671,6 +9644,10 @@
+ #if (DBVER >= 43)
+ ADD_INT(d, DB_STAT_SUBSYSTEM);
+ ADD_INT(d, DB_STAT_MEMP_HASH);
++ ADD_INT(d, DB_STAT_LOCK_CONF);
++ ADD_INT(d, DB_STAT_LOCK_LOCKERS);
++ ADD_INT(d, DB_STAT_LOCK_OBJECTS);
++ ADD_INT(d, DB_STAT_LOCK_PARAMS);
+ #endif
+
+ #if (DBVER >= 48)
+@@ -9690,7 +9667,6 @@
+ ADD_INT(d, DB_EID_INVALID);
+ ADD_INT(d, DB_EID_BROADCAST);
+
+-#if (DBVER >= 42)
+ ADD_INT(d, DB_TIME_NOTGRANTED);
+ ADD_INT(d, DB_TXN_NOT_DURABLE);
+ ADD_INT(d, DB_TXN_WRITE_NOSYNC);
+@@ -9698,9 +9674,8 @@
+ ADD_INT(d, DB_INIT_REP);
+ ADD_INT(d, DB_ENCRYPT);
+ ADD_INT(d, DB_CHKSUM);
+-#endif
+
+-#if (DBVER >= 42) && (DBVER < 47)
++#if (DBVER < 47)
+ ADD_INT(d, DB_LOG_AUTOREMOVE);
+ ADD_INT(d, DB_DIRECT_LOG);
+ #endif
+@@ -9733,6 +9708,20 @@
+ ADD_INT(d, DB_VERB_REPLICATION);
+ ADD_INT(d, DB_VERB_WAITSFOR);
+
++#if (DBVER >= 50)
++ ADD_INT(d, DB_VERB_REP_SYSTEM);
++#endif
++
++#if (DBVER >= 47)
++ ADD_INT(d, DB_VERB_REP_ELECT);
++ ADD_INT(d, DB_VERB_REP_LEASE);
++ ADD_INT(d, DB_VERB_REP_MISC);
++ ADD_INT(d, DB_VERB_REP_MSGS);
++ ADD_INT(d, DB_VERB_REP_SYNC);
++ ADD_INT(d, DB_VERB_REPMGR_CONNFAIL);
++ ADD_INT(d, DB_VERB_REPMGR_MISC);
++#endif
++
+ #if (DBVER >= 45)
+ ADD_INT(d, DB_EVENT_PANIC);
+ ADD_INT(d, DB_EVENT_REP_CLIENT);
+@@ -9748,16 +9737,25 @@
+ ADD_INT(d, DB_EVENT_WRITE_FAILED);
+ #endif
+
++#if (DBVER >= 50)
++ ADD_INT(d, DB_REPMGR_CONF_ELECTIONS);
++ ADD_INT(d, DB_EVENT_REP_MASTER_FAILURE);
++ ADD_INT(d, DB_EVENT_REP_DUPMASTER);
++ ADD_INT(d, DB_EVENT_REP_ELECTION_FAILED);
++#endif
++#if (DBVER >= 48)
++ ADD_INT(d, DB_EVENT_REG_ALIVE);
++ ADD_INT(d, DB_EVENT_REG_PANIC);
++#endif
++
+ ADD_INT(d, DB_REP_DUPMASTER);
+ ADD_INT(d, DB_REP_HOLDELECTION);
+ #if (DBVER >= 44)
+ ADD_INT(d, DB_REP_IGNORE);
+ ADD_INT(d, DB_REP_JOIN_FAILURE);
+ #endif
+-#if (DBVER >= 42)
+ ADD_INT(d, DB_REP_ISPERM);
+ ADD_INT(d, DB_REP_NOTPERM);
+-#endif
+ ADD_INT(d, DB_REP_NEWSITE);
+
+ ADD_INT(d, DB_REP_MASTER);
+@@ -9766,7 +9764,13 @@
+ ADD_INT(d, DB_REP_PERMANENT);
+
+ #if (DBVER >= 44)
++#if (DBVER >= 50)
++ ADD_INT(d, DB_REP_CONF_AUTOINIT);
++#else
+ ADD_INT(d, DB_REP_CONF_NOAUTOINIT);
++#endif /* 5.0 */
++#endif /* 4.4 */
++#if (DBVER >= 44)
+ ADD_INT(d, DB_REP_CONF_DELAYCLIENT);
+ ADD_INT(d, DB_REP_CONF_BULK);
+ ADD_INT(d, DB_REP_CONF_NOWAIT);
+@@ -9774,9 +9778,7 @@
+ ADD_INT(d, DB_REP_REREQUEST);
+ #endif
+
+-#if (DBVER >= 42)
+ ADD_INT(d, DB_REP_NOBUFFER);
+-#endif
+
+ #if (DBVER >= 46)
+ ADD_INT(d, DB_REP_LEASE_EXPIRED);
+@@ -9819,6 +9821,28 @@
+ ADD_INT(d, DB_STAT_ALL);
+ #endif
+
++#if (DBVER >= 51)
++ ADD_INT(d, DB_REPMGR_ACKS_ALL_AVAILABLE);
++#endif
++
++#if (DBVER >= 48)
++ ADD_INT(d, DB_REP_CONF_INMEM);
++#endif
++
++ ADD_INT(d, DB_TIMEOUT);
++
++#if (DBVER >= 50)
++ ADD_INT(d, DB_FORCESYNC);
++#endif
++
++#if (DBVER >= 48)
++ ADD_INT(d, DB_FAILCHK);
++#endif
++
++#if (DBVER >= 51)
++ ADD_INT(d, DB_HOTBACKUP_IN_PROGRESS);
++#endif
++
+ #if (DBVER >= 43)
+ ADD_INT(d, DB_BUFFER_SMALL);
+ ADD_INT(d, DB_SEQ_DEC);
+@@ -9856,6 +9880,10 @@
+ ADD_INT(d, DB_SET_LOCK_TIMEOUT);
+ ADD_INT(d, DB_SET_TXN_TIMEOUT);
+
++#if (DBVER >= 48)
++ ADD_INT(d, DB_SET_REG_TIMEOUT);
++#endif
++
+ /* The exception name must be correct for pickled exception *
+ * objects to unpickle properly. */
+ #ifdef PYBSDDB_STANDALONE /* different value needed for standalone pybsddb */
+@@ -9927,9 +9955,7 @@
+ MAKE_EX(DBNoSuchFileError);
+ MAKE_EX(DBPermissionsError);
+
+-#if (DBVER >= 42)
+ MAKE_EX(DBRepHandleDeadError);
+-#endif
+ #if (DBVER >= 44)
+ MAKE_EX(DBRepLockoutError);
+ #endif
+@@ -9947,6 +9973,7 @@
+ #undef MAKE_EX
+
+ /* Initialise the C API structure and add it to the module */
++ bsddb_api.api_version = PYBSDDB_API_VERSION;
+ bsddb_api.db_type = &DB_Type;
+ bsddb_api.dbcursor_type = &DBCursor_Type;
+ bsddb_api.dblogcursor_type = &DBLogCursor_Type;
+@@ -9955,19 +9982,25 @@
+ bsddb_api.dblock_type = &DBLock_Type;
+ #if (DBVER >= 43)
+ bsddb_api.dbsequence_type = &DBSequence_Type;
++#else
++ bsddb_api.dbsequence_type = NULL;
+ #endif
+ bsddb_api.makeDBError = makeDBError;
+
+ /*
+- ** Capsules exist from Python 3.1, but I
+- ** don't want to break the API compatibility
+- ** for already published Python versions.
++ ** Capsules exist from Python 2.7 and 3.1.
++ ** We don't support Python 3.0 anymore, so...
++ ** #if (PY_VERSION_HEX < ((PY_MAJOR_VERSION < 3) ? 0x02070000 : 0x03020000))
+ */
+-#if (PY_VERSION_HEX < 0x03020000)
++#if (PY_VERSION_HEX < 0x02070000)
+ py_api = PyCObject_FromVoidPtr((void*)&bsddb_api, NULL);
+ #else
+ {
+- char py_api_name[250];
++ /*
++ ** The data must outlive the call!!. So, the static definition.
++ ** The buffer must be big enough...
++ */
++ static char py_api_name[MODULE_NAME_MAX_LEN+10];
+
+ strcpy(py_api_name, _bsddbModuleName);
+ strcat(py_api_name, ".api");
+Index: b/Modules/bsddb.h
+===================================================================
+--- a/Modules/bsddb.h
++++ b/Modules/bsddb.h
+@@ -109,7 +109,7 @@
+ #error "eek! DBVER can't handle minor versions > 9"
+ #endif
+
+-#define PY_BSDDB_VERSION "4.8.4.2"
++#define PY_BSDDB_VERSION "5.1.2"
+
+ /* Python object definitions */
+
+@@ -236,7 +236,7 @@
+ /* To access the structure from an external module, use code like the
+ following (error checking missed out for clarity):
+
+- // If you are using Python before 3.2:
++ // If you are using Python before 2.7:
+ BSDDB_api* bsddb_api;
+ PyObject* mod;
+ PyObject* cobj;
+@@ -249,7 +249,7 @@
+ Py_DECREF(mod);
+
+
+- // If you are using Python 3.2 or up:
++ // If you are using Python 2.7 or up: (except Python 3.0, unsupported)
+ BSDDB_api* bsddb_api;
+
+ // Use "bsddb3._pybsddb.api" if you're using
+@@ -257,10 +257,14 @@
+ bsddb_api = (void **)PyCapsule_Import("bsddb._bsddb.api", 1);
+
+
++ Check "api_version" number before trying to use the API.
++
+ The structure's members must not be changed.
+ */
+
++#define PYBSDDB_API_VERSION 1
+ typedef struct {
++ unsigned int api_version;
+ /* Type objects */
+ PyTypeObject* db_type;
+ PyTypeObject* dbcursor_type;
+@@ -268,9 +272,7 @@
+ PyTypeObject* dbenv_type;
+ PyTypeObject* dbtxn_type;
+ PyTypeObject* dblock_type;
+-#if (DBVER >= 43)
+- PyTypeObject* dbsequence_type;
+-#endif
++ PyTypeObject* dbsequence_type; /* If DBVER < 43 -> NULL */
+
+ /* Functions */
+ int (*makeDBError)(int err);
+@@ -289,9 +291,9 @@
+ #define DBEnvObject_Check(v) ((v)->ob_type == bsddb_api->dbenv_type)
+ #define DBTxnObject_Check(v) ((v)->ob_type == bsddb_api->dbtxn_type)
+ #define DBLockObject_Check(v) ((v)->ob_type == bsddb_api->dblock_type)
+-#if (DBVER >= 43)
+-#define DBSequenceObject_Check(v) ((v)->ob_type == bsddb_api->dbsequence_type)
+-#endif
++#define DBSequenceObject_Check(v) \
++ ((bsddb_api->dbsequence_type) && \
++ ((v)->ob_type == bsddb_api->dbsequence_type))
+
+ #endif /* COMPILING_BSDDB_C */
+
diff --git a/examples/python2.7/patches/deb-locations.diff b/examples/python2.7/patches/deb-locations.diff
new file mode 100644
index 0000000..6226f3f
--- /dev/null
+++ b/examples/python2.7/patches/deb-locations.diff
@@ -0,0 +1,63 @@
+# DP: adjust locations of directories to debian policy
+
+--- a/Demo/tkinter/guido/ManPage.py
++++ b/Demo/tkinter/guido/ManPage.py
+@@ -189,8 +189,9 @@
+ def test():
+ import os
+ import sys
+- # XXX This directory may be different on your system
+- MANDIR = '/usr/local/man/mann'
++ # XXX This directory may be different on your system,
++ # it is here set for Debian GNU/Linux.
++ MANDIR = '/usr/share/man'
+ DEFAULTPAGE = 'Tcl'
+ formatted = 0
+ if sys.argv[1:] and sys.argv[1] == '-f':
+--- a/Demo/tkinter/guido/tkman.py
++++ b/Demo/tkinter/guido/tkman.py
+@@ -9,8 +9,8 @@
+ from Tkinter import *
+ from ManPage import ManPage
+
+-MANNDIRLIST = ['/depot/sundry/man/mann','/usr/local/man/mann']
+-MAN3DIRLIST = ['/depot/sundry/man/man3','/usr/local/man/man3']
++MANNDIRLIST = ['/depot/sundry/man/mann','/usr/share/man/mann']
++MAN3DIRLIST = ['/depot/sundry/man/man3','/usr/share/man/man3']
+
+ foundmanndir = 0
+ for dir in MANNDIRLIST:
+--- a/Misc/python.man
++++ b/Misc/python.man
+@@ -323,7 +323,7 @@
+ These are subject to difference depending on local installation
+ conventions; ${prefix} and ${exec_prefix} are installation-dependent
+ and should be interpreted as for GNU software; they may be the same.
+-The default for both is \fI/usr/local\fP.
++On Debian GNU/{Hurd,Linux} the default for both is \fI/usr\fP.
+ .IP \fI${exec_prefix}/bin/python\fP
+ Recommended location of the interpreter.
+ .PP
+--- a/Tools/faqwiz/faqconf.py
++++ b/Tools/faqwiz/faqconf.py
+@@ -21,7 +21,7 @@
+ OWNEREMAIL = "nobody@anywhere.org" # Email for feedback
+ HOMEURL = "http://www.python.org" # Related home page
+ HOMENAME = "Python home" # Name of related home page
+-RCSBINDIR = "/usr/local/bin/" # Directory containing RCS commands
++RCSBINDIR = "/usr/bin/" # Directory containing RCS commands
+ # (must end in a slash)
+
+ # Parameters you can normally leave alone
+--- a/Tools/webchecker/webchecker.py
++++ b/Tools/webchecker/webchecker.py
+@@ -19,7 +19,8 @@
+ a directory listing is returned. Now, you can point webchecker to the
+ document tree in the local file system of your HTTP daemon, and have
+ most of it checked. In fact the default works this way if your local
+-web tree is located at /usr/local/etc/httpd/htdpcs (the default for
++web tree is located at /var/www, which is the default for Debian
++GNU/Linux. Other systems use /usr/local/etc/httpd/htdocs (the default for
+ the NCSA HTTP daemon and probably others).
+
+ Report printed:
diff --git a/examples/python2.7/patches/deb-setup.diff b/examples/python2.7/patches/deb-setup.diff
new file mode 100644
index 0000000..472fbc7
--- /dev/null
+++ b/examples/python2.7/patches/deb-setup.diff
@@ -0,0 +1,17 @@
+# DP: Don't include /usr/local/include and /usr/local/lib as gcc search paths
+
+--- a/setup.py
++++ b/setup.py
+@@ -368,9 +368,9 @@
+ os.unlink(tmpfile)
+
+ def detect_modules(self):
+- # Ensure that /usr/local is always used
+- add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
+- add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
++ # On Debian /usr/local is always used, so we don't include it twice
++ #add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
++ #add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
+ self.add_multiarch_paths()
+
+ # Add paths specified in the environment variables LDFLAGS and
diff --git a/examples/python2.7/patches/debug-build.diff b/examples/python2.7/patches/debug-build.diff
new file mode 100644
index 0000000..25d6e29
--- /dev/null
+++ b/examples/python2.7/patches/debug-build.diff
@@ -0,0 +1,228 @@
+# DP: Change the interpreter to build and install python extensions
+# DP: built with the python-dbg interpreter with a different name into
+# DP: the same path (by appending `_d' to the extension name).
+
+--- a/Lib/distutils/command/build.py
++++ b/Lib/distutils/command/build.py
+@@ -91,7 +91,8 @@
+ # 'lib.<plat>' under the base build directory. We only use one of
+ # them for a given distribution, though --
+ if self.build_purelib is None:
+- self.build_purelib = os.path.join(self.build_base, 'lib')
++ self.build_purelib = os.path.join(self.build_base,
++ 'lib' + plat_specifier)
+ if self.build_platlib is None:
+ self.build_platlib = os.path.join(self.build_base,
+ 'lib' + plat_specifier)
+--- a/Lib/distutils/sysconfig.py
++++ b/Lib/distutils/sysconfig.py
+@@ -85,7 +85,7 @@
+ # Include is located in the srcdir
+ inc_dir = os.path.join(srcdir, "Include")
+ return inc_dir
+- return os.path.join(prefix, "include", "python" + get_python_version())
++ return os.path.join(prefix, "include", "python" + get_python_version())+(sys.pydebug and "_d" or "")
+ elif os.name == "nt":
+ return os.path.join(prefix, "include")
+ elif os.name == "os2":
+@@ -216,7 +216,7 @@
+ if python_build:
+ return os.path.join(os.path.dirname(sys.executable), "Makefile")
+ lib_dir = get_python_lib(plat_specific=1, standard_lib=1)
+- return os.path.join(lib_dir, "config", "Makefile")
++ return os.path.join(lib_dir, "config"+(sys.pydebug and "_d" or ""), "Makefile")
+
+
+ def parse_config_h(fp, g=None):
+--- a/Lib/sysconfig.py
++++ b/Lib/sysconfig.py
+@@ -299,7 +299,7 @@
+ def _get_makefile_filename():
+ if _PYTHON_BUILD:
+ return os.path.join(_PROJECT_BASE, "Makefile")
+- return os.path.join(get_path('platstdlib').replace("/usr/local","/usr",1), "config", "Makefile")
++ return os.path.join(get_path('platstdlib').replace("/usr/local","/usr",1), "config" + (sys.pydebug and "_d" or ""), "Makefile")
+
+
+ def _init_posix(vars):
+@@ -384,7 +384,7 @@
+ else:
+ inc_dir = _PROJECT_BASE
+ else:
+- inc_dir = get_path('platinclude').replace("/usr/local","/usr",1)
++ inc_dir = get_path('platinclude').replace("/usr/local","/usr",1)+(sys.pydebug and "_d" or "")
+ return os.path.join(inc_dir, 'pyconfig.h')
+
+ def get_scheme_names():
+--- a/Makefile.pre.in
++++ b/Makefile.pre.in
+@@ -108,8 +108,8 @@
+ # Detailed destination directories
+ BINLIBDEST= $(LIBDIR)/python$(VERSION)
+ LIBDEST= $(SCRIPTDIR)/python$(VERSION)
+-INCLUDEPY= $(INCLUDEDIR)/python$(VERSION)
+-CONFINCLUDEPY= $(CONFINCLUDEDIR)/python$(VERSION)
++INCLUDEPY= $(INCLUDEDIR)/python$(VERSION)$(DEBUG_EXT)
++CONFINCLUDEPY= $(CONFINCLUDEDIR)/python$(VERSION)$(DEBUG_EXT)
+ LIBP= $(LIBDIR)/python$(VERSION)
+
+ # Symbols used for using shared libraries
+@@ -123,6 +123,8 @@
+ EXE= @EXEEXT@
+ BUILDEXE= @BUILDEXEEXT@
+
++DEBUG_EXT= @DEBUG_EXT@
++
+ # Short name and location for Mac OS X Python framework
+ UNIVERSALSDK=@UNIVERSALSDK@
+ PYTHONFRAMEWORK= @PYTHONFRAMEWORK@
+@@ -429,7 +431,7 @@
+ $(AR) $(ARFLAGS) $@ $(MODOBJS)
+ $(RANLIB) $@
+
+-libpython$(VERSION).so: $(LIBRARY_OBJS)
++libpython$(VERSION)$(DEBUG_EXT).so: $(LIBRARY_OBJS)
+ if test $(INSTSONAME) != $(LDLIBRARY); then \
+ $(BLDSHARED) $(PY_LDFLAGS) -Wl,-h$(INSTSONAME) -o $(INSTSONAME) $(LIBRARY_OBJS) $(MODLIBS) $(SHLIBS) $(LIBC) $(LIBM) $(LDLAST); \
+ $(LN) -f $(INSTSONAME) $@; \
+@@ -990,8 +992,8 @@
+ $(INSTALL_DATA) pyconfig.h $(DESTDIR)$(CONFINCLUDEPY)/pyconfig.h
+
+ # Install the library and miscellaneous stuff needed for extending/embedding
+-# This goes into $(exec_prefix)
+-LIBPL= $(LIBP)/config
++# This goes into $(exec_prefix)$(DEBUG_EXT)
++LIBPL= $(LIBP)/config$(DEBUG_EXT)
+
+ # pkgconfig directory
+ LIBPC= $(LIBDIR)/pkgconfig
+--- a/Misc/python-config.in
++++ b/Misc/python-config.in
+@@ -45,7 +45,7 @@
+
+ elif opt in ('--libs', '--ldflags'):
+ libs = getvar('LIBS').split() + getvar('SYSLIBS').split()
+- libs.append('-lpython'+pyver)
++ libs.append('-lpython' + pyver + (sys.pydebug and "_d" or ""))
+ # add the prefix/lib/pythonX.Y/config dir, but only if there is no
+ # shared library in prefix/lib/.
+ if opt == '--ldflags':
+--- a/Python/dynload_shlib.c
++++ b/Python/dynload_shlib.c
+@@ -46,6 +46,10 @@
+ {"module.exe", "rb", C_EXTENSION},
+ {"MODULE.EXE", "rb", C_EXTENSION},
+ #else
++#ifdef Py_DEBUG
++ {"_d.so", "rb", C_EXTENSION},
++ {"module_d.so", "rb", C_EXTENSION},
++#endif
+ {".so", "rb", C_EXTENSION},
+ {"module.so", "rb", C_EXTENSION},
+ #endif
+--- a/Python/sysmodule.c
++++ b/Python/sysmodule.c
+@@ -1504,6 +1504,12 @@
+ PyString_FromString("legacy"));
+ #endif
+
++#ifdef Py_DEBUG
++ PyDict_SetItemString(sysdict, "pydebug", Py_True);
++#else
++ PyDict_SetItemString(sysdict, "pydebug", Py_False);
++#endif
++
+ #undef SET_SYS_FROM_STRING
+ if (PyErr_Occurred())
+ return NULL;
+--- a/configure.ac
++++ b/configure.ac
+@@ -634,7 +634,7 @@
+ AC_MSG_CHECKING(LIBRARY)
+ if test -z "$LIBRARY"
+ then
+- LIBRARY='libpython$(VERSION).a'
++ LIBRARY='libpython$(VERSION)$(DEBUG_EXT).a'
+ fi
+ AC_MSG_RESULT($LIBRARY)
+
+@@ -779,8 +779,8 @@
+ INSTSONAME="$LDLIBRARY".$SOVERSION
+ ;;
+ Linux*|GNU*|NetBSD*|FreeBSD*|DragonFly*|OpenBSD*)
+- LDLIBRARY='libpython$(VERSION).so'
+- BLDLIBRARY='-L. -lpython$(VERSION)'
++ LDLIBRARY='libpython$(VERSION)$(DEBUG_EXT).so'
++ BLDLIBRARY='-L. -lpython$(VERSION)$(DEBUG_EXT)'
+ RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
+ case $ac_sys_system in
+ FreeBSD*)
+@@ -904,6 +904,12 @@
+ fi],
+ [AC_MSG_RESULT(no)])
+
++if test "$Py_DEBUG" = 'true'
++then
++ DEBUG_EXT=_d
++fi
++AC_SUBST(DEBUG_EXT)
++
+ # XXX Shouldn't the code above that fiddles with BASECFLAGS and OPT be
+ # merged with this chunk of code?
+
+@@ -1759,7 +1765,7 @@
+ esac
+ ;;
+ CYGWIN*) SO=.dll;;
+- *) SO=.so;;
++ *) SO=$DEBUG_EXT.so;;
+ esac
+ else
+ # this might also be a termcap variable, see #610332
+--- a/Lib/distutils/tests/test_build_ext.py
++++ b/Lib/distutils/tests/test_build_ext.py
+@@ -287,8 +287,8 @@
+ finally:
+ os.chdir(old_wd)
+ self.assertTrue(os.path.exists(so_file))
+- self.assertEqual(os.path.splitext(so_file)[-1],
+- sysconfig.get_config_var('SO'))
++ so_ext = sysconfig.get_config_var('SO')
++ self.assertEqual(so_file[len(so_file)-len(so_ext):], so_ext)
+ so_dir = os.path.dirname(so_file)
+ self.assertEqual(so_dir, other_tmp_dir)
+ cmd.compiler = None
+@@ -296,8 +296,7 @@
+ cmd.run()
+ so_file = cmd.get_outputs()[0]
+ self.assertTrue(os.path.exists(so_file))
+- self.assertEqual(os.path.splitext(so_file)[-1],
+- sysconfig.get_config_var('SO'))
++ self.assertEqual(so_file[len(so_file)-len(so_ext):], so_ext)
+ so_dir = os.path.dirname(so_file)
+ self.assertEqual(so_dir, cmd.build_lib)
+
+--- a/Lib/distutils/tests/test_build.py
++++ b/Lib/distutils/tests/test_build.py
+@@ -20,10 +20,6 @@
+ # if not specified, plat_name gets the current platform
+ self.assertEqual(cmd.plat_name, get_platform())
+
+- # build_purelib is build + lib
+- wanted = os.path.join(cmd.build_base, 'lib')
+- self.assertEqual(cmd.build_purelib, wanted)
+-
+ # build_platlib is 'build/lib.platform-x.x[-pydebug]'
+ # examples:
+ # build/lib.macosx-10.3-i386-2.7
+@@ -34,6 +30,10 @@
+ wanted = os.path.join(cmd.build_base, 'lib' + plat_spec)
+ self.assertEqual(cmd.build_platlib, wanted)
+
++ # build_purelib is build + lib
++ wanted = os.path.join(cmd.build_base, 'lib' + plat_spec)
++ self.assertEqual(cmd.build_purelib, wanted)
++
+ # by default, build_lib = build_purelib
+ self.assertEqual(cmd.build_lib, cmd.build_purelib)
+
diff --git a/examples/python2.7/patches/disable-sem-check.diff b/examples/python2.7/patches/disable-sem-check.diff
new file mode 100644
index 0000000..a0d1321
--- /dev/null
+++ b/examples/python2.7/patches/disable-sem-check.diff
@@ -0,0 +1,28 @@
+# DP: Assume working semaphores on Linux, don't rely on running kernel for the check.
+
+--- a/configure.ac
++++ b/configure.ac
+@@ -3645,6 +3645,11 @@
+ [ac_cv_posix_semaphores_enabled=no],
+ [ac_cv_posix_semaphores_enabled=yes])
+ )
++case $ac_sys_system in
++ Linux*)
++ # assume enabled, see https://launchpad.net/bugs/630511
++ ac_cv_posix_semaphores_enabled=yes
++esac
+ AC_MSG_RESULT($ac_cv_posix_semaphores_enabled)
+ if test $ac_cv_posix_semaphores_enabled = no
+ then
+@@ -3681,6 +3686,11 @@
+ [ac_cv_broken_sem_getvalue=yes],
+ [ac_cv_broken_sem_getvalue=yes])
+ )
++case $ac_sys_system in
++ Linux*)
++ # assume enabled, see https://launchpad.net/bugs/630511
++ ac_cv_broken_sem_getvalue=no
++esac
+ AC_MSG_RESULT($ac_cv_broken_sem_getvalue)
+ if test $ac_cv_broken_sem_getvalue = yes
+ then
diff --git a/examples/python2.7/patches/disable-ssl-cert-tests.diff b/examples/python2.7/patches/disable-ssl-cert-tests.diff
new file mode 100644
index 0000000..af3deab
--- /dev/null
+++ b/examples/python2.7/patches/disable-ssl-cert-tests.diff
@@ -0,0 +1,75 @@
+--- a/Lib/test/test_ssl.py
++++ b/Lib/test/test_ssl.py
+@@ -231,59 +231,6 @@
+ finally:
+ s.close()
+
+- # this should succeed because we specify the root cert
+- s = ssl.wrap_socket(socket.socket(socket.AF_INET),
+- cert_reqs=ssl.CERT_REQUIRED,
+- ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
+- try:
+- s.connect(("svn.python.org", 443))
+- finally:
+- s.close()
+-
+- def test_connect_ex(self):
+- # Issue #11326: check connect_ex() implementation
+- with test_support.transient_internet("svn.python.org"):
+- s = ssl.wrap_socket(socket.socket(socket.AF_INET),
+- cert_reqs=ssl.CERT_REQUIRED,
+- ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
+- try:
+- self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
+- self.assertTrue(s.getpeercert())
+- finally:
+- s.close()
+-
+- def test_non_blocking_connect_ex(self):
+- # Issue #11326: non-blocking connect_ex() should allow handshake
+- # to proceed after the socket gets ready.
+- with test_support.transient_internet("svn.python.org"):
+- s = ssl.wrap_socket(socket.socket(socket.AF_INET),
+- cert_reqs=ssl.CERT_REQUIRED,
+- ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
+- do_handshake_on_connect=False)
+- try:
+- s.setblocking(False)
+- rc = s.connect_ex(('svn.python.org', 443))
+- # EWOULDBLOCK under Windows, EINPROGRESS elsewhere
+- self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
+- # Wait for connect to finish
+- select.select([], [s], [], 5.0)
+- # Non-blocking handshake
+- while True:
+- try:
+- s.do_handshake()
+- break
+- except ssl.SSLError as err:
+- if err.args[0] == ssl.SSL_ERROR_WANT_READ:
+- select.select([s], [], [], 5.0)
+- elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
+- select.select([], [s], [], 5.0)
+- else:
+- raise
+- # SSL established
+- self.assertTrue(s.getpeercert())
+- finally:
+- s.close()
+-
+ @unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
+ def test_makefile_close(self):
+ # Issue #5238: creating a file-like object with makefile() shouldn't
+@@ -343,12 +290,6 @@
+ else:
+ self.fail("Got server certificate %s for svn.python.org!" % pem)
+
+- pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
+- if not pem:
+- self.fail("No server certificate on svn.python.org:443!")
+- if test_support.verbose:
+- sys.stdout.write("\nVerified certificate for svn.python.org:443 is\n%s\n" % pem)
+-
+ def test_algorithms(self):
+ # Issue #8484: all algorithms should be available when verifying a
+ # certificate.
diff --git a/examples/python2.7/patches/distutils-install-layout.diff b/examples/python2.7/patches/distutils-install-layout.diff
new file mode 100644
index 0000000..22c5ab6
--- /dev/null
+++ b/examples/python2.7/patches/distutils-install-layout.diff
@@ -0,0 +1,380 @@
+# DP: distutils: Add an option --install-layout=deb, which
+# DP: - installs into $prefix/dist-packages instead of $prefix/site-packages.
+# DP: - doesn't encode the python version into the egg name.
+
+Index: b/Doc/install/index.rst
+===================================================================
+--- a/Doc/install/index.rst
++++ b/Doc/install/index.rst
+@@ -240,6 +240,8 @@
+ +-----------------+-----------------------------------------------------+--------------------------------------------------+-------+
+ | Platform | Standard installation location | Default value | Notes |
+ +=================+=====================================================+==================================================+=======+
++| Debian/Ubuntu | :file:`{prefix}/lib/python{X.Y}/dist-packages` | :file:`/usr/local/lib/python{X.Y}/dist-packages` | \(0) |
+++-----------------+-----------------------------------------------------+--------------------------------------------------+-------+
+ | Unix (pure) | :file:`{prefix}/lib/python{X.Y}/site-packages` | :file:`/usr/local/lib/python{X.Y}/site-packages` | \(1) |
+ +-----------------+-----------------------------------------------------+--------------------------------------------------+-------+
+ | Unix (non-pure) | :file:`{exec-prefix}/lib/python{X.Y}/site-packages` | :file:`/usr/local/lib/python{X.Y}/site-packages` | \(1) |
+@@ -249,6 +251,14 @@
+
+ Notes:
+
++(0)
++ Starting with Python-2.6 Debian/Ubuntu uses for the Python which comes within
++ the Linux distribution a non-default name for the installation directory. This
++ is to avoid overwriting of the python modules which come with the distribution,
++ which unfortunately is the upstream behaviour of the installation tools. The
++ non-default name in :file:`/usr/local` is used not to overwrite a local python
++ installation (defaulting to :file:`/usr/local`).
++
+ (1)
+ Most Linux distributions include Python as a standard part of the system, so
+ :file:`{prefix}` and :file:`{exec-prefix}` are usually both :file:`/usr` on
+@@ -433,6 +443,15 @@
+
+ /usr/bin/python setup.py install --prefix=/usr/local
+
++Starting with Python-2.6 Debian/Ubuntu does use
++:file:`/usr/lib/python{X.Y}/dist-packages` and
++:file:`/usr/local/lib/python{X.Y}/dist-packages` for the installation
++of python modules included in the Linux distribution. To overwrite
++the name of the site directory, explicitely use the :option:`--prefix`
++option, however make sure that the installation path is included in
++``sys.path``. For packaging of python modules for Debian/Ubuntu, use
++the new ``setup.py install`` option :option:`--install-layout=deb`.
++
+ Another possibility is a network filesystem where the name used to write to a
+ remote directory is different from the name used to read it: for example, the
+ Python interpreter accessed as :file:`/usr/local/bin/python` might search for
+@@ -684,6 +703,17 @@
+ import them, this directory must be added to ``sys.path``. There are several
+ different ways to add the directory.
+
++On Debian/Ubuntu, starting with Python-2.6 the convention for system
++installed packages is to put then in the
++:file:`/usr/lib/python{X.Y}/dist-packages/` directory, and for locally
++installed packages is to put them in the
++:file:`/usr/lib/python{X.Y}/dist-packages/` directory. To share the
++locally installed packages for the system provided Python with the
++locally installed packages of a local python installation, make
++:file:`/usr/lib/python{X.Y}/dist-packages/` a symbolic link to the
++:file:`{...}/site-packages/` directory of your local python
++installation.
++
+ The most convenient way is to add a path configuration file to a directory
+ that's already on Python's path, usually to the :file:`.../site-packages/`
+ directory. Path configuration files have an extension of :file:`.pth`, and each
+Index: b/Lib/distutils/command/install.py
+===================================================================
+--- a/Lib/distutils/command/install.py
++++ b/Lib/distutils/command/install.py
+@@ -47,6 +47,20 @@
+ 'scripts': '$base/bin',
+ 'data' : '$base',
+ },
++ 'unix_local': {
++ 'purelib': '$base/local/lib/python$py_version_short/dist-packages',
++ 'platlib': '$platbase/local/lib/python$py_version_short/dist-packages',
++ 'headers': '$base/local/include/python$py_version_short/$dist_name',
++ 'scripts': '$base/local/bin',
++ 'data' : '$base/local',
++ },
++ 'deb_system': {
++ 'purelib': '$base/lib/python$py_version_short/dist-packages',
++ 'platlib': '$platbase/lib/python$py_version_short/dist-packages',
++ 'headers': '$base/include/python$py_version_short/$dist_name',
++ 'scripts': '$base/bin',
++ 'data' : '$base',
++ },
+ 'unix_home': {
+ 'purelib': '$base/lib/python',
+ 'platlib': '$base/lib/python',
+@@ -154,6 +168,9 @@
+
+ ('record=', None,
+ "filename in which to record list of installed files"),
++
++ ('install-layout=', None,
++ "installation layout to choose (known values: deb, unix)"),
+ ]
+
+ boolean_options = ['compile', 'force', 'skip-build', 'user']
+@@ -168,6 +185,7 @@
+ self.exec_prefix = None
+ self.home = None
+ self.user = 0
++ self.prefix_option = None
+
+ # These select only the installation base; it's up to the user to
+ # specify the installation scheme (currently, that means supplying
+@@ -189,6 +207,9 @@
+ self.install_userbase = USER_BASE
+ self.install_usersite = USER_SITE
+
++ # enable custom installation, known values: deb
++ self.install_layout = None
++
+ self.compile = None
+ self.optimize = None
+
+@@ -421,6 +442,7 @@
+ self.install_base = self.install_platbase = self.home
+ self.select_scheme("unix_home")
+ else:
++ self.prefix_option = self.prefix
+ if self.prefix is None:
+ if self.exec_prefix is not None:
+ raise DistutilsOptionError, \
+@@ -435,7 +457,23 @@
+
+ self.install_base = self.prefix
+ self.install_platbase = self.exec_prefix
+- self.select_scheme("unix_prefix")
++ if self.install_layout:
++ if self.install_layout.lower() in ['deb']:
++ self.select_scheme("deb_system")
++ elif self.install_layout.lower() in ['posix', 'unix']:
++ self.select_scheme("unix_prefix")
++ else:
++ raise DistutilsOptionError(
++ "unknown value for --install-layout")
++ elif (self.prefix_option and os.path.normpath(self.prefix) != '/usr/local') \
++ or 'PYTHONUSERBASE' in os.environ \
++ or 'real_prefix' in sys.__dict__:
++ self.select_scheme("unix_prefix")
++ else:
++ if os.path.normpath(self.prefix) == '/usr/local':
++ self.select_scheme("deb_system")
++ else:
++ self.select_scheme("unix_local")
+
+ # finalize_unix ()
+
+Index: b/Lib/distutils/command/install_egg_info.py
+===================================================================
+--- a/Lib/distutils/command/install_egg_info.py
++++ b/Lib/distutils/command/install_egg_info.py
+@@ -14,18 +14,37 @@
+ description = "Install package's PKG-INFO metadata as an .egg-info file"
+ user_options = [
+ ('install-dir=', 'd', "directory to install to"),
++ ('install-layout', None, "custom installation layout"),
+ ]
+
+ def initialize_options(self):
+ self.install_dir = None
++ self.install_layout = None
++ self.prefix_option = None
+
+ def finalize_options(self):
+ self.set_undefined_options('install_lib',('install_dir','install_dir'))
+- basename = "%s-%s-py%s.egg-info" % (
+- to_filename(safe_name(self.distribution.get_name())),
+- to_filename(safe_version(self.distribution.get_version())),
+- sys.version[:3]
+- )
++ self.set_undefined_options('install',('install_layout','install_layout'))
++ self.set_undefined_options('install',('prefix_option','prefix_option'))
++ if self.install_layout:
++ basename = "%s-%s.egg-info" % (
++ to_filename(safe_name(self.distribution.get_name())),
++ to_filename(safe_version(self.distribution.get_version()))
++ )
++ if not self.install_layout.lower() in ['deb']:
++ raise DistutilsOptionError(
++ "unknown value for --install-layout")
++ elif self.prefix_option or 'real_prefix' in sys.__dict__:
++ basename = "%s-%s-py%s.egg-info" % (
++ to_filename(safe_name(self.distribution.get_name())),
++ to_filename(safe_version(self.distribution.get_version())),
++ sys.version[:3]
++ )
++ else:
++ basename = "%s-%s.egg-info" % (
++ to_filename(safe_name(self.distribution.get_name())),
++ to_filename(safe_version(self.distribution.get_version()))
++ )
+ self.target = os.path.join(self.install_dir, basename)
+ self.outputs = [self.target]
+
+Index: b/Lib/distutils/sysconfig.py
+===================================================================
+--- a/Lib/distutils/sysconfig.py
++++ b/Lib/distutils/sysconfig.py
+@@ -110,6 +110,7 @@
+ If 'prefix' is supplied, use it instead of sys.prefix or
+ sys.exec_prefix -- i.e., ignore 'plat_specific'.
+ """
++ is_default_prefix = not prefix or os.path.normpath(prefix) in ('/usr', '/usr/local')
+ if prefix is None:
+ prefix = plat_specific and EXEC_PREFIX or PREFIX
+
+@@ -118,6 +119,8 @@
+ "lib", "python" + get_python_version())
+ if standard_lib:
+ return libpython
++ elif is_default_prefix and 'PYTHONUSERBASE' not in os.environ and 'real_prefix' not in sys.__dict__:
++ return os.path.join(libpython, "dist-packages")
+ else:
+ return os.path.join(libpython, "site-packages")
+
+Index: b/Lib/site.py
+===================================================================
+--- a/Lib/site.py
++++ b/Lib/site.py
+@@ -285,6 +285,13 @@
+
+ if ENABLE_USER_SITE and os.path.isdir(user_site):
+ addsitedir(user_site, known_paths)
++ if ENABLE_USER_SITE:
++ for dist_libdir in ("local/lib", "lib"):
++ user_site = os.path.join(USER_BASE, dist_libdir,
++ "python" + sys.version[:3],
++ "dist-packages")
++ if os.path.isdir(user_site):
++ addsitedir(user_site, known_paths)
+ return known_paths
+
+ def getsitepackages():
+Index: b/Lib/sysconfig.py
+===================================================================
+--- a/Lib/sysconfig.py
++++ b/Lib/sysconfig.py
+@@ -16,6 +16,26 @@
+ 'scripts': '{base}/bin',
+ 'data': '{base}',
+ },
++ 'posix_local': {
++ 'stdlib': '{base}/lib/python{py_version_short}',
++ 'platstdlib': '{platbase}/lib/python{py_version_short}',
++ 'purelib': '{base}/local/lib/python{py_version_short}/dist-packages',
++ 'platlib': '{platbase}/local/lib/python{py_version_short}/dist-packages',
++ 'include': '{base}/local/include/python{py_version_short}',
++ 'platinclude': '{platbase}/local/include/python{py_version_short}',
++ 'scripts': '{base}/local/bin',
++ 'data': '{base}/local',
++ },
++ 'deb_system': {
++ 'stdlib': '{base}/lib/python{py_version_short}',
++ 'platstdlib': '{platbase}/lib/python{py_version_short}',
++ 'purelib': '{base}/lib/python{py_version_short}/dist-packages',
++ 'platlib': '{platbase}/lib/python{py_version_short}/dist-packages',
++ 'include': '{base}/include/python{py_version_short}',
++ 'platinclude': '{platbase}/include/python{py_version_short}',
++ 'scripts': '{base}/bin',
++ 'data': '{base}',
++ },
+ 'posix_home': {
+ 'stdlib': '{base}/lib/python',
+ 'platstdlib': '{base}/lib/python',
+@@ -125,7 +145,7 @@
+ _PYTHON_BUILD = is_python_build()
+
+ if _PYTHON_BUILD:
+- for scheme in ('posix_prefix', 'posix_home'):
++ for scheme in ('posix_prefix', 'posix_local', 'deb_system', 'posix_home'):
+ _INSTALL_SCHEMES[scheme]['include'] = '{projectbase}/Include'
+ _INSTALL_SCHEMES[scheme]['platinclude'] = '{srcdir}'
+
+@@ -159,8 +179,11 @@
+
+ def _get_default_scheme():
+ if os.name == 'posix':
+- # the default scheme for posix is posix_prefix
+- return 'posix_prefix'
++ # the default scheme for posix on Debian/Ubuntu is posix_local
++ # FIXME: return dist-packages/posix_prefix only for
++ # is_default_prefix and 'PYTHONUSERBASE' not in os.environ and 'real_prefix' not in sys.__dict__
++ # is_default_prefix = not prefix or os.path.normpath(prefix) in ('/usr', '/usr/local')
++ return 'posix_local'
+ return os.name
+
+ def _getuserbase():
+@@ -305,7 +328,7 @@
+ def _get_makefile_filename():
+ if _PYTHON_BUILD:
+ return os.path.join(_PROJECT_BASE, "Makefile")
+- return os.path.join(get_path('platstdlib'), "config", "Makefile")
++ return os.path.join(get_path('platstdlib').replace("/usr/local","/usr",1), "config", "Makefile")
+
+
+ def _init_posix(vars):
+@@ -390,7 +413,7 @@
+ else:
+ inc_dir = _PROJECT_BASE
+ else:
+- inc_dir = get_path('platinclude')
++ inc_dir = get_path('platinclude').replace("/usr/local","/usr",1)
+ return os.path.join(inc_dir, 'pyconfig.h')
+
+ def get_scheme_names():
+Index: b/Lib/test/test_site.py
+===================================================================
+--- a/Lib/test/test_site.py
++++ b/Lib/test/test_site.py
+@@ -242,10 +242,13 @@
+ elif os.sep == '/':
+ # OS X non-framwework builds, Linux, FreeBSD, etc
+ self.assertEqual(len(dirs), 2)
+- wanted = os.path.join('xoxo', 'lib', 'python' + sys.version[:3],
+- 'site-packages')
++ wanted = os.path.join('xoxo', 'local', 'lib',
++ 'python' + sys.version[:3],
++ 'dist-packages')
+ self.assertEqual(dirs[0], wanted)
+- wanted = os.path.join('xoxo', 'lib', 'site-python')
++ wanted = os.path.join('xoxo', 'lib',
++ 'python' + sys.version[:3],
++ 'dist-packages')
+ self.assertEqual(dirs[1], wanted)
+ else:
+ # other platforms
+Index: b/Lib/test/test_sysconfig.py
+===================================================================
+--- a/Lib/test/test_sysconfig.py
++++ b/Lib/test/test_sysconfig.py
+@@ -230,8 +230,8 @@
+ self.assertTrue(os.path.isfile(config_h), config_h)
+
+ def test_get_scheme_names(self):
+- wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user',
+- 'posix_home', 'posix_prefix', 'posix_user')
++ wanted = ('deb_system', 'nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user',
++ 'posix_home', 'posix_local', 'posix_prefix', 'posix_user')
+ self.assertEqual(get_scheme_names(), wanted)
+
+ def test_symlink(self):
+Index: b/Lib/distutils/tests/test_install.py
+===================================================================
+--- a/Lib/distutils/tests/test_install.py
++++ b/Lib/distutils/tests/test_install.py
+@@ -195,7 +195,7 @@
+
+ found = [os.path.basename(line) for line in content.splitlines()]
+ expected = ['hello.py', 'hello.pyc', 'sayhi',
+- 'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
++ 'UNKNOWN-0.0.0.egg-info']
+ self.assertEqual(found, expected)
+
+ def test_record_extensions(self):
+@@ -225,7 +225,7 @@
+
+ found = [os.path.basename(line) for line in content.splitlines()]
+ expected = [_make_ext_name('xx'),
+- 'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
++ 'UNKNOWN-0.0.0.egg-info']
+ self.assertEqual(found, expected)
+
+ def test_debug_mode(self):
+Index: b/Lib/pydoc.py
+===================================================================
+--- a/Lib/pydoc.py
++++ b/Lib/pydoc.py
+@@ -359,6 +359,7 @@
+ 'marshal', 'posix', 'signal', 'sys',
+ 'thread', 'zipimport') or
+ (file.startswith(basedir) and
++ not file.startswith(os.path.join(basedir, 'dist-packages')) and
+ not file.startswith(os.path.join(basedir, 'site-packages')))) and
+ object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
+ if docloc.startswith("http://"):
diff --git a/examples/python2.7/patches/distutils-link.diff b/examples/python2.7/patches/distutils-link.diff
new file mode 100644
index 0000000..62d4fd0
--- /dev/null
+++ b/examples/python2.7/patches/distutils-link.diff
@@ -0,0 +1,18 @@
+# DP: Don't add standard library dirs to library_dirs and runtime_library_dirs.
+
+--- a/Lib/distutils/unixccompiler.py
++++ b/Lib/distutils/unixccompiler.py
+@@ -213,7 +213,12 @@
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+ libraries, library_dirs, runtime_library_dirs = \
+ self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
+-
++ # filter out standard library paths, which are not explicitely needed
++ # for linking
++ library_dirs = [dir for dir in library_dirs
++ if not dir in ('/lib', '/lib64', '/usr/lib', '/usr/lib64')]
++ runtime_library_dirs = [dir for dir in runtime_library_dirs
++ if not dir in ('/lib', '/lib64', '/usr/lib', '/usr/lib64')]
+ lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
+ libraries)
+ if type(output_dir) not in (StringType, NoneType):
diff --git a/examples/python2.7/patches/distutils-sysconfig.diff b/examples/python2.7/patches/distutils-sysconfig.diff
new file mode 100644
index 0000000..ec1a84c
--- /dev/null
+++ b/examples/python2.7/patches/distutils-sysconfig.diff
@@ -0,0 +1,33 @@
+# DP: Allow setting BASECFLAGS, OPT and EXTRA_LDFLAGS (like, CC, CXX, CPP,
+# DP: CFLAGS, CPPFLAGS, CCSHARED, LDSHARED) from the environment.
+
+Index: b/Lib/distutils/sysconfig.py
+===================================================================
+--- a/Lib/distutils/sysconfig.py
++++ b/Lib/distutils/sysconfig.py
+@@ -153,8 +153,8 @@
+ varies across Unices and is stored in Python's Makefile.
+ """
+ if compiler.compiler_type == "unix":
+- (cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \
+- get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
++ (cc, cxx, opt, cflags, extra_cflags, basecflags, ccshared, ldshared, so_ext, ar, ar_flags) = \
++ get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', 'EXTRA_CFLAGS', 'BASECFLAGS',
+ 'CCSHARED', 'LDSHARED', 'SO', 'AR',
+ 'ARFLAGS')
+
+@@ -200,8 +200,13 @@
+ cpp = cc + " -E" # not always
+ if 'LDFLAGS' in os.environ:
+ ldshared = ldshared + ' ' + os.environ['LDFLAGS']
++ if 'BASECFLAGS' in os.environ:
++ basecflags = os.environ['BASECFLAGS']
++ if 'OPT' in os.environ:
++ opt = os.environ['OPT']
++ cflags = ' '.join(str(x) for x in (basecflags, opt, extra_cflags) if x)
+ if 'CFLAGS' in os.environ:
+- cflags = opt + ' ' + os.environ['CFLAGS']
++ cflags = ' '.join(str(x) for x in (basecflags, opt, os.environ['CFLAGS'], extra_cflags) if x)
+ ldshared = ldshared + ' ' + os.environ['CFLAGS']
+ if 'CPPFLAGS' in os.environ:
+ cpp = cpp + ' ' + os.environ['CPPFLAGS']
diff --git a/examples/python2.7/patches/do-not-italicize-punctuation.patch b/examples/python2.7/patches/do-not-italicize-punctuation.patch
new file mode 100644
index 0000000..f2c42a2
--- /dev/null
+++ b/examples/python2.7/patches/do-not-italicize-punctuation.patch
@@ -0,0 +1,50 @@
+diff -ru python2.5-2.5.2/Misc/python.man python2.5/Misc/python.man
+--- python2.5-2.5.2/Misc/python.man 2010-04-26 09:17:43.000000000 -0700
++++ python2.5/Misc/python.man 2010-04-26 09:16:39.000000000 -0700
+@@ -243,9 +243,9 @@
+ from that file;
+ when called with
+ .B \-c
+-.I command,
++.IR command ,
+ it executes the Python statement(s) given as
+-.I command.
++.IR command .
+ Here
+ .I command
+ may contain multiple statements separated by newlines.
+@@ -255,7 +255,7 @@
+ .PP
+ If available, the script name and additional arguments thereafter are
+ passed to the script in the Python variable
+-.I sys.argv ,
++.IR sys.argv ,
+ which is a list of strings (you must first
+ .I import sys
+ to be able to access it).
+@@ -269,14 +269,14 @@
+ .I '-c'.
+ Note that options interpreted by the Python interpreter itself
+ are not placed in
+-.I sys.argv.
++.IR sys.argv .
+ .PP
+ In interactive mode, the primary prompt is `>>>'; the second prompt
+ (which appears when a command is not complete) is `...'.
+ The prompts can be changed by assignment to
+ .I sys.ps1
+ or
+-.I sys.ps2.
++.IR sys.ps2 .
+ The interpreter quits when it reads an EOF at a prompt.
+ When an unhandled exception occurs, a stack trace is printed and
+ control returns to the primary prompt; in non-interactive mode, the
+@@ -335,7 +335,7 @@
+ inserted in the path in front of $PYTHONPATH.
+ The search path can be manipulated from within a Python program as the
+ variable
+-.I sys.path .
++.IR sys.path .
+ .IP PYTHONSTARTUP
+ If this is the name of a readable file, the Python commands in that
+ file are executed before the first prompt is displayed in interactive
diff --git a/examples/python2.7/patches/doc-faq.dpatch b/examples/python2.7/patches/doc-faq.dpatch
new file mode 100644
index 0000000..84124a2
--- /dev/null
+++ b/examples/python2.7/patches/doc-faq.dpatch
@@ -0,0 +1,54 @@
+#! /bin/sh -e
+
+# DP: Mention the FAQ on the documentation index page.
+
+dir=
+if [ $# -eq 3 -a "$2" = '-d' ]; then
+ pdir="-d $3"
+ dir="$3/"
+elif [ $# -ne 1 ]; then
+ echo >&2 "usage: `basename $0`: -patch|-unpatch [-d <srcdir>]"
+ exit 1
+fi
+case "$1" in
+ -patch)
+ patch $pdir -f --no-backup-if-mismatch -p0 < $0
+ #cd ${dir}gcc && autoconf
+ ;;
+ -unpatch)
+ patch $pdir -f --no-backup-if-mismatch -R -p0 < $0
+ #rm ${dir}gcc/configure
+ ;;
+ *)
+ echo >&2 "usage: `basename $0`: -patch|-unpatch [-d <srcdir>]"
+ exit 1
+esac
+exit 0
+
+--- Doc/html/index.html.in~ 2002-04-01 18:11:27.000000000 +0200
++++ Doc/html/index.html.in 2003-04-05 13:33:35.000000000 +0200
+@@ -123,6 +123,24 @@
+ </ul>
+ </td>
+ </tr>
++ <tr>
++ <td valign="baseline" class="left-column">
++ &nbsp;
++ <ul>
++ <li> <a href="../../python/FAQ.html" class="title"
++ >FAQ (local copy)</a>
++ <br>(python package must be installed)
++ </ul>
++ </td>
++ <td valign="baseline" class="right-column">
++ &nbsp;
++ <ul>
++ <li> <a href="http://www.python.org/cgi-bin/faqw.py" class="title"
++ >FAQ (online wizard)</a>
++ <br>(maybe more recent)
++ </ul>
++ </td>
++ </tr>
+ </tbody>
+ </table>
+ <p>
diff --git a/examples/python2.7/patches/doc-nodownload.diff b/examples/python2.7/patches/doc-nodownload.diff
new file mode 100644
index 0000000..df5bd4e
--- /dev/null
+++ b/examples/python2.7/patches/doc-nodownload.diff
@@ -0,0 +1,13 @@
+# DP: Don't try to download documentation tools
+
+--- a/Doc/Makefile
++++ b/Doc/Makefile
+@@ -57,7 +57,7 @@
+
+ update: clean checkout
+
+-build: checkout
++build:
+ mkdir -p build/$(BUILDER) build/doctrees
+ $(PYTHON) tools/sphinx-build.py $(ALLSPHINXOPTS)
+ @echo
diff --git a/examples/python2.7/patches/enable-fpectl.diff b/examples/python2.7/patches/enable-fpectl.diff
new file mode 100644
index 0000000..ebcb833
--- /dev/null
+++ b/examples/python2.7/patches/enable-fpectl.diff
@@ -0,0 +1,14 @@
+# DP: Enable the build of the fpectl module.
+
+--- a/setup.py
++++ b/setup.py
+@@ -1276,6 +1276,9 @@
+ else:
+ missing.append('_curses_panel')
+
++ #fpectl fpectlmodule.c ...
++ exts.append( Extension('fpectl', ['fpectlmodule.c']) )
++
+ # Andrew Kuchling's zlib module. Note that some versions of zlib
+ # 1.1.3 have security problems. See CERT Advisory CA-2002-07:
+ # http://www.cert.org/advisories/CA-2002-07.html
diff --git a/examples/python2.7/patches/hg-updates.diff b/examples/python2.7/patches/hg-updates.diff
new file mode 100644
index 0000000..a4f8320
--- /dev/null
+++ b/examples/python2.7/patches/hg-updates.diff
@@ -0,0 +1,27402 @@
+# DP: hg updates of the 2.7 release branch (until 2012-08-15).
+
+# hg diff -r v2.7.3 | filterdiff --exclude=.*ignore --exclude=.hg* --remove-timestamps
+
+diff -r 70274d53c1dd Doc/c-api/buffer.rst
+--- a/Doc/c-api/buffer.rst
++++ b/Doc/c-api/buffer.rst
+@@ -33,7 +33,7 @@
+ Starting from version 1.6, Python has been providing Python-level buffer
+ objects and a C-level buffer API so that any built-in or used-defined type can
+ expose its characteristics. Both, however, have been deprecated because of
+-various shortcomings, and have been officially removed in Python 3.0 in favour
++various shortcomings, and have been officially removed in Python 3 in favour
+ of a new C-level buffer API and a new Python-level object named
+ :class:`memoryview`.
+
+diff -r 70274d53c1dd Doc/c-api/memory.rst
+--- a/Doc/c-api/memory.rst
++++ b/Doc/c-api/memory.rst
+@@ -98,7 +98,7 @@
+
+ Allocates *n* bytes and returns a pointer of type :c:type:`void\*` to the
+ allocated memory, or *NULL* if the request fails. Requesting zero bytes returns
+- a distinct non-*NULL* pointer if possible, as if :c:func:`PyMem_Malloc(1)` had
++ a distinct non-*NULL* pointer if possible, as if ``PyMem_Malloc(1)`` had
+ been called instead. The memory will not have been initialized in any way.
+
+
+@@ -106,7 +106,7 @@
+
+ Resizes the memory block pointed to by *p* to *n* bytes. The contents will be
+ unchanged to the minimum of the old and the new sizes. If *p* is *NULL*, the
+- call is equivalent to :c:func:`PyMem_Malloc(n)`; else if *n* is equal to zero,
++ call is equivalent to ``PyMem_Malloc(n)``; else if *n* is equal to zero,
+ the memory block is resized but is not freed, and the returned pointer is
+ non-*NULL*. Unless *p* is *NULL*, it must have been returned by a previous call
+ to :c:func:`PyMem_Malloc` or :c:func:`PyMem_Realloc`. If the request fails,
+@@ -118,7 +118,7 @@
+
+ Frees the memory block pointed to by *p*, which must have been returned by a
+ previous call to :c:func:`PyMem_Malloc` or :c:func:`PyMem_Realloc`. Otherwise, or
+- if :c:func:`PyMem_Free(p)` has been called before, undefined behavior occurs. If
++ if ``PyMem_Free(p)`` has been called before, undefined behavior occurs. If
+ *p* is *NULL*, no operation is performed.
+
+ The following type-oriented macros are provided for convenience. Note that
+diff -r 70274d53c1dd Doc/c-api/objbuffer.rst
+--- a/Doc/c-api/objbuffer.rst
++++ b/Doc/c-api/objbuffer.rst
+@@ -8,7 +8,7 @@
+
+ This section describes the legacy buffer protocol, which has been introduced
+ in Python 1.6. It is still supported but deprecated in the Python 2.x series.
+-Python 3.0 introduces a new buffer protocol which fixes weaknesses and
++Python 3 introduces a new buffer protocol which fixes weaknesses and
+ shortcomings of the protocol, and has been backported to Python 2.6. See
+ :ref:`bufferobjects` for more information.
+
+diff -r 70274d53c1dd Doc/c-api/typeobj.rst
+--- a/Doc/c-api/typeobj.rst
++++ b/Doc/c-api/typeobj.rst
+@@ -1227,7 +1227,7 @@
+ - If the :const:`Py_TPFLAGS_CHECKTYPES` flag is set, binary and ternary
+ functions must check the type of all their operands, and implement the
+ necessary conversions (at least one of the operands is an instance of the
+- defined type). This is the recommended way; with Python 3.0 coercion will
++ defined type). This is the recommended way; with Python 3 coercion will
+ disappear completely.
+
+ If the operation is not defined for the given operands, binary and ternary
+diff -r 70274d53c1dd Doc/distutils/apiref.rst
+--- a/Doc/distutils/apiref.rst
++++ b/Doc/distutils/apiref.rst
+@@ -444,7 +444,9 @@
+ Define a preprocessor macro for all compilations driven by this compiler object.
+ The optional parameter *value* should be a string; if it is not supplied, then
+ the macro will be defined without an explicit value and the exact outcome
+- depends on the compiler used (XXX true? does ANSI say anything about this?)
++ depends on the compiler used.
++
++ .. XXX true? does ANSI say anything about this?
+
+
+ .. method:: CCompiler.undefine_macro(name)
+@@ -598,7 +600,9 @@
+
+ *output_libname* should be a library name, not a filename; the filename will be
+ inferred from the library name. *output_dir* is the directory where the library
+- file will be put. XXX defaults to what?
++ file will be put.
++
++ .. XXX defaults to what?
+
+ *debug* is a boolean; if true, debugging information will be included in the
+ library (note that on most platforms, it is the compile step where this matters:
+@@ -718,30 +722,29 @@
+
+ Invokes :func:`distutils.util.execute` This method invokes a Python function
+ *func* with the given arguments *args*, after logging and taking into account
+- the *dry_run* flag. XXX see also.
++ the *dry_run* flag.
+
+
+ .. method:: CCompiler.spawn(cmd)
+
+ Invokes :func:`distutils.util.spawn`. This invokes an external process to run
+- the given command. XXX see also.
++ the given command.
+
+
+ .. method:: CCompiler.mkpath(name[, mode=511])
+
+ Invokes :func:`distutils.dir_util.mkpath`. This creates a directory and any
+- missing ancestor directories. XXX see also.
++ missing ancestor directories.
+
+
+ .. method:: CCompiler.move_file(src, dst)
+
+- Invokes :meth:`distutils.file_util.move_file`. Renames *src* to *dst*. XXX see
+- also.
++ Invokes :meth:`distutils.file_util.move_file`. Renames *src* to *dst*.
+
+
+ .. method:: CCompiler.announce(msg[, level=1])
+
+- Write a message using :func:`distutils.log.debug`. XXX see also.
++ Write a message using :func:`distutils.log.debug`.
+
+
+ .. method:: CCompiler.warn(msg)
+@@ -869,8 +872,6 @@
+ prefix of all files and directories in the archive. *root_dir* and *base_dir*
+ both default to the current directory. Returns the name of the archive file.
+
+- .. XXX This should be changed to support bz2 files.
+-
+
+ .. function:: make_tarball(base_name, base_dir[, compress='gzip', verbose=0, dry_run=0])
+
+@@ -882,8 +883,6 @@
+ possibly plus the appropriate compression extension (:file:`.gz`, :file:`.bz2`
+ or :file:`.Z`). Return the output filename.
+
+- .. XXX This should be replaced with calls to the :mod:`tarfile` module.
+-
+
+ .. function:: make_zipfile(base_name, base_dir[, verbose=0, dry_run=0])
+
+@@ -974,8 +973,8 @@
+ Copy an entire directory tree *src* to a new location *dst*. Both *src* and
+ *dst* must be directory names. If *src* is not a directory, raise
+ :exc:`DistutilsFileError`. If *dst* does not exist, it is created with
+- :func:`mkpath`. The end result of the copy is that every file in *src* is
+- copied to *dst*, and directories under *src* are recursively copied to *dst*.
++ :func:`mkpath`. The end result of the copy is that every file in *src* is
++ copied to *dst*, and directories under *src* are recursively copied to *dst*.
+ Return the list of files that were copied or might have been copied, using their
+ output name. The return value is unaffected by *update* or *dry_run*: it is
+ simply the list of all files under *src*, with the names changed to be under
+@@ -988,6 +987,10 @@
+ destination of the symlink will be copied. *update* and *verbose* are the same
+ as for :func:`copy_file`.
+
++ Files in *src* that begin with :file:`.nfs` are skipped (more information on
++ these files is available in answer D2 of the `NFS FAQ page
++ <http://nfs.sourceforge.net/#section_d>`_.
++
+
+ .. function:: remove_tree(directory[, verbose=0, dry_run=0])
+
+@@ -995,8 +998,6 @@
+ errors are ignored (apart from being reported to ``sys.stdout`` if *verbose* is
+ true).
+
+-.. XXX Some of this could be replaced with the shutil module?
+-
+
+ :mod:`distutils.file_util` --- Single file operations
+ =====================================================
+@@ -1110,8 +1111,6 @@
+
+ * ``macosx-10.6-intel``
+
+- .. % XXX isn't this also provided by some other non-distutils module?
+-
+
+ .. function:: convert_path(pathname)
+
+@@ -1311,8 +1310,6 @@
+ the "negative alias" of :option:`--verbose`, then :option:`--quiet` on the
+ command line sets *verbose* to false.
+
+-.. XXX Should be replaced with :mod:`optparse`.
+-
+
+ .. function:: fancy_getopt(options, negative_opt, object, args)
+
+@@ -1329,8 +1326,6 @@
+
+ Wraps *text* to less than *width* wide.
+
+- .. XXX Should be replaced with :mod:`textwrap` (which is available in Python
+- 2.3 and later).
+
+
+ .. class:: FancyGetopt([option_table=None])
+@@ -1394,10 +1389,6 @@
+ :synopsis: A simple logging mechanism, 282-style
+
+
+-.. XXX Should be replaced with standard :mod:`logging` module.
+-
+-
+-
+ :mod:`distutils.spawn` --- Spawn a sub-process
+ ==============================================
+
+@@ -1894,9 +1885,6 @@
+ :synopsis: Build the .py/.pyc files of a package
+
+
+-.. % todo
+-
+-
+ :mod:`distutils.command.build_scripts` --- Build the scripts of a package
+ =========================================================================
+
+diff -r 70274d53c1dd Doc/distutils/sourcedist.rst
+--- a/Doc/distutils/sourcedist.rst
++++ b/Doc/distutils/sourcedist.rst
+@@ -51,8 +51,7 @@
+ of the standard Python library since Python 1.6)
+
+ (4)
+- requires the :program:`compress` program. Notice that this format is now
+- pending for deprecation and will be removed in the future versions of Python.
++ requires the :program:`compress` program.
+
+ When using any ``tar`` format (``gztar``, ``bztar``, ``ztar`` or
+ ``tar``) under Unix, you can specify the ``owner`` and ``group`` names
+diff -r 70274d53c1dd Doc/distutils/uploading.rst
+--- a/Doc/distutils/uploading.rst
++++ b/Doc/distutils/uploading.rst
+@@ -74,4 +74,9 @@
+
+ $ python setup.py --long-description | rst2html.py > output.html
+
+-:mod:`docutils` will display a warning if there's something wrong with your syntax.
++:mod:`docutils` will display a warning if there's something wrong with your
++syntax. Because PyPI applies additional checks (e.g. by passing ``--no-raw``
++to ``rst2html.py`` in the command above), being able to run the command above
++without warnings does not guarantee that PyPI will convert the content
++successfully.
++
+diff -r 70274d53c1dd Doc/extending/newtypes.rst
+--- a/Doc/extending/newtypes.rst
++++ b/Doc/extending/newtypes.rst
+@@ -1521,9 +1521,8 @@
+ }
+
+ The only further addition is that the destructor needs to call the weak
+-reference manager to clear any weak references. This should be done before any
+-other parts of the destruction have occurred, but is only required if the weak
+-reference list is non-*NULL*::
++reference manager to clear any weak references. This is only required if the
++weak reference list is non-*NULL*::
+
+ static void
+ instance_dealloc(PyInstanceObject *inst)
+diff -r 70274d53c1dd Doc/faq/design.rst
+--- a/Doc/faq/design.rst
++++ b/Doc/faq/design.rst
+@@ -297,8 +297,9 @@
+ How fast are exceptions?
+ ------------------------
+
+-A try/except block is extremely efficient. Actually catching an exception is
+-expensive. In versions of Python prior to 2.0 it was common to use this idiom::
++A try/except block is extremely efficient if no exceptions are raised. Actually
++catching an exception is expensive. In versions of Python prior to 2.0 it was
++common to use this idiom::
+
+ try:
+ value = mydict[key]
+@@ -309,11 +310,10 @@
+ This only made sense when you expected the dict to have the key almost all the
+ time. If that wasn't the case, you coded it like this::
+
+- if mydict.has_key(key):
++ if key in mydict:
+ value = mydict[key]
+ else:
+- mydict[key] = getvalue(key)
+- value = mydict[key]
++ value = mydict[key] = getvalue(key)
+
+ .. note::
+
+diff -r 70274d53c1dd Doc/faq/library.rst
+--- a/Doc/faq/library.rst
++++ b/Doc/faq/library.rst
+@@ -14,7 +14,7 @@
+
+ Check :ref:`the Library Reference <library-index>` to see if there's a relevant
+ standard library module. (Eventually you'll learn what's in the standard
+-library and will able to skip this step.)
++library and will be able to skip this step.)
+
+ For third-party packages, search the `Python Package Index
+ <http://pypi.python.org/pypi>`_ or try `Google <http://www.google.com>`_ or
+@@ -28,7 +28,7 @@
+ If you can't find a source file for a module it may be a built-in or
+ dynamically loaded module implemented in C, C++ or other compiled language.
+ In this case you may not have the source file or it may be something like
+-mathmodule.c, somewhere in a C source directory (not on the Python Path).
++:file:`mathmodule.c`, somewhere in a C source directory (not on the Python Path).
+
+ There are (at least) three kinds of modules in Python:
+
+@@ -60,18 +60,18 @@
+ interpreter is installed on your platform.
+
+ If you would like the script to be independent of where the Python interpreter
+-lives, you can use the "env" program. Almost all Unix variants support the
+-following, assuming the Python interpreter is in a directory on the user's
+-$PATH::
++lives, you can use the :program:`env` program. Almost all Unix variants support
++the following, assuming the Python interpreter is in a directory on the user's
++:envvar:`PATH`::
+
+ #!/usr/bin/env python
+
+-*Don't* do this for CGI scripts. The $PATH variable for CGI scripts is often
+-very minimal, so you need to use the actual absolute pathname of the
++*Don't* do this for CGI scripts. The :envvar:`PATH` variable for CGI scripts is
++often very minimal, so you need to use the actual absolute pathname of the
+ interpreter.
+
+-Occasionally, a user's environment is so full that the /usr/bin/env program
+-fails; or there's no env program at all. In that case, you can try the
++Occasionally, a user's environment is so full that the :program:`/usr/bin/env`
++program fails; or there's no env program at all. In that case, you can try the
+ following hack (due to Alex Rezinsky)::
+
+ #! /bin/sh
+@@ -91,12 +91,12 @@
+
+ .. XXX curses *is* built by default, isn't it?
+
+-For Unix variants: The standard Python source distribution comes with a curses
+-module in the ``Modules/`` subdirectory, though it's not compiled by default
+-(note that this is not available in the Windows distribution -- there is no
+-curses module for Windows).
++For Unix variants the standard Python source distribution comes with a curses
++module in the :source:`Modules` subdirectory, though it's not compiled by default.
++(Note that this is not available in the Windows distribution -- there is no
++curses module for Windows.)
+
+-The curses module supports basic curses features as well as many additional
++The :mod:`curses` module supports basic curses features as well as many additional
+ functions from ncurses and SYSV curses such as colour, alternative character set
+ support, pads, and mouse support. This means the module isn't compatible with
+ operating systems that only have BSD curses, but there don't seem to be any
+@@ -110,7 +110,7 @@
+ -------------------------------------------------
+
+ The :mod:`atexit` module provides a register function that is similar to C's
+-onexit.
++:c:func:`onexit`.
+
+
+ Why don't my signal handlers work?
+@@ -140,8 +140,8 @@
+ The :mod:`unittest` module is a fancier testing framework modelled on Java and
+ Smalltalk testing frameworks.
+
+-For testing, it helps to write the program so that it may be easily tested by
+-using good modular design. Your program should have almost all functionality
++To make testing easier, you should use good modular design in your program.
++Your program should have almost all functionality
+ encapsulated in either functions or class methods -- and this sometimes has the
+ surprising and delightful effect of making the program run faster (because local
+ variable accesses are faster than global accesses). Furthermore the program
+@@ -157,7 +157,7 @@
+
+ Once your program is organized as a tractable collection of functions and class
+ behaviours you should write test functions that exercise the behaviours. A test
+-suite can be associated with each module which automates a sequence of tests.
++suite that automates a sequence of tests can be associated with each module.
+ This sounds like a lot of work, but since Python is so terse and flexible it's
+ surprisingly easy. You can make coding much more pleasant and fun by writing
+ your test functions in parallel with the "production code", since this makes it
+@@ -186,7 +186,7 @@
+ How do I get a single keypress at a time?
+ -----------------------------------------
+
+-For Unix variants: There are several solutions. It's straightforward to do this
++For Unix variants there are several solutions. It's straightforward to do this
+ using curses, but curses is a fairly large module to learn. Here's a solution
+ without curses::
+
+@@ -273,7 +273,7 @@
+
+ time.sleep(10)
+
+-Instead of trying to guess how long a :func:`time.sleep` delay will be enough,
++Instead of trying to guess a good delay value for :func:`time.sleep`,
+ it's better to use some kind of semaphore mechanism. One idea is to use the
+ :mod:`Queue` module to create a queue object, let each thread append a token to
+ the queue when it finishes, and let the main thread read as many tokens from the
+@@ -284,10 +284,10 @@
+ ---------------------------------------------------------
+
+ Use the :mod:`Queue` module to create a queue containing a list of jobs. The
+-:class:`~Queue.Queue` class maintains a list of objects with ``.put(obj)`` to
+-add an item to the queue and ``.get()`` to return an item. The class will take
+-care of the locking necessary to ensure that each job is handed out exactly
+-once.
++:class:`~Queue.Queue` class maintains a list of objects and has a ``.put(obj)``
++method that adds items to the queue and a ``.get()`` method to return them.
++The class will take care of the locking necessary to ensure that each job is
++handed out exactly once.
+
+ Here's a trivial example::
+
+@@ -296,7 +296,7 @@
+ # The worker thread gets jobs off the queue. When the queue is empty, it
+ # assumes there will be no more work and exits.
+ # (Realistically workers will run until terminated.)
+- def worker ():
++ def worker():
+ print 'Running worker'
+ time.sleep(0.1)
+ while True:
+@@ -329,6 +329,8 @@
+
+ When run, this will produce the following output:
+
++.. code-block:: none
++
+ Running worker
+ Running worker
+ Running worker
+@@ -343,15 +345,15 @@
+ Worker <Thread(worker 1, started)> running with argument 5
+ ...
+
+-Consult the module's documentation for more details; the ``Queue`` class
+-provides a featureful interface.
++Consult the module's documentation for more details; the :class:`~Queue.Queue`
++class provides a featureful interface.
+
+
+ What kinds of global value mutation are thread-safe?
+ ----------------------------------------------------
+
+-A global interpreter lock (GIL) is used internally to ensure that only one
+-thread runs in the Python VM at a time. In general, Python offers to switch
++A :term:`global interpreter lock` (GIL) is used internally to ensure that only
++one thread runs in the Python VM at a time. In general, Python offers to switch
+ among threads only between bytecode instructions; how frequently it switches can
+ be set via :func:`sys.setcheckinterval`. Each bytecode instruction and
+ therefore all the C implementation code reached from each instruction is
+@@ -396,7 +398,7 @@
+ .. XXX mention multiprocessing
+ .. XXX link to dbeazley's talk about GIL?
+
+-The Global Interpreter Lock (GIL) is often seen as a hindrance to Python's
++The :term:`global interpreter lock` (GIL) is often seen as a hindrance to Python's
+ deployment on high-end multiprocessor server machines, because a multi-threaded
+ Python program effectively only uses one CPU, due to the insistence that
+ (almost) all Python code can only run while the GIL is held.
+@@ -459,7 +461,7 @@
+ To truncate a file, open it using ``f = open(filename, "r+")``, and use
+ ``f.truncate(offset)``; offset defaults to the current seek position. There's
+ also ``os.ftruncate(fd, offset)`` for files opened with :func:`os.open`, where
+-``fd`` is the file descriptor (a small integer).
++*fd* is the file descriptor (a small integer).
+
+ The :mod:`shutil` module also contains a number of functions to work on files
+ including :func:`~shutil.copyfile`, :func:`~shutil.copytree`, and
+@@ -493,7 +495,7 @@
+ "short integer" (2 bytes), and 'l' reads one "long integer" (4 bytes) from the
+ string.
+
+-For data that is more regular (e.g. a homogeneous list of ints or thefloats),
++For data that is more regular (e.g. a homogeneous list of ints or floats),
+ you can also use the :mod:`array` module.
+
+
+@@ -503,7 +505,7 @@
+ :func:`os.read` is a low-level function which takes a file descriptor, a small
+ integer representing the opened file. :func:`os.popen` creates a high-level
+ file object, the same type returned by the built-in :func:`open` function.
+-Thus, to read n bytes from a pipe p created with :func:`os.popen`, you need to
++Thus, to read *n* bytes from a pipe *p* created with :func:`os.popen`, you need to
+ use ``p.read(n)``.
+
+
+@@ -522,9 +524,9 @@
+
+ Warning: in general it is unwise to do this because you can easily cause a
+ deadlock where your process is blocked waiting for output from the child while
+-the child is blocked waiting for input from you. This can be caused because the
+-parent expects the child to output more text than it does, or it can be caused
+-by data being stuck in stdio buffers due to lack of flushing. The Python parent
++the child is blocked waiting for input from you. This can be caused by the
++parent expecting the child to output more text than it does or by data being
++stuck in stdio buffers due to lack of flushing. The Python parent
+ can of course explicitly flush the data it sends to the child before it reads
+ any output, but if the child is a naive C program it may have been written to
+ never explicitly flush its output, even if it is interactive, since flushing is
+@@ -544,8 +546,8 @@
+ In many cases, all you really need is to run some data through a command and get
+ the result back. Unless the amount of data is very large, the easiest way to do
+ this is to write it to a temporary file and run the command with that temporary
+-file as input. The standard module :mod:`tempfile` exports a ``mktemp()``
+-function to generate unique temporary file names. ::
++file as input. The standard module :mod:`tempfile` exports a
++:func:`~tempfile.mktemp` function to generate unique temporary file names. ::
+
+ import tempfile
+ import os
+@@ -673,15 +675,12 @@
+ sys.stdout.write(httpobj.getfile().read())
+
+ Note that in general for percent-encoded POST operations, query strings must be
+-quoted using :func:`urllib.quote`. For example to send name="Guy Steele, Jr."::
++quoted using :func:`urllib.urlencode`. For example, to send
++``name=Guy Steele, Jr.``::
+
+- >>> from urllib import quote
+- >>> x = quote("Guy Steele, Jr.")
+- >>> x
+- 'Guy%20Steele,%20Jr.'
+- >>> query_string = "name="+x
+- >>> query_string
+- 'name=Guy%20Steele,%20Jr.'
++ >>> import urllib
++ >>> urllib.urlencode({'name': 'Guy Steele, Jr.'})
++ 'name=Guy+Steele%2C+Jr.'
+
+
+ What module should I use to help with generating HTML?
+@@ -689,19 +688,8 @@
+
+ .. XXX add modern template languages
+
+-There are many different modules available:
+-
+-* HTMLgen is a class library of objects corresponding to all the HTML 3.2 markup
+- tags. It's used when you are writing in Python and wish to synthesize HTML
+- pages for generating a web or for CGI forms, etc.
+-
+-* DocumentTemplate and Zope Page Templates are two different systems that are
+- part of Zope.
+-
+-* Quixote's PTL uses Python syntax to assemble strings of text.
+-
+-Consult the `Web Programming wiki pages
+-<http://wiki.python.org/moin/WebProgramming>`_ for more links.
++You can find a collection of useful links on the `Web Programming wiki page
++<http://wiki.python.org/moin/WebProgramming>`_.
+
+
+ How do I send mail from a Python script?
+@@ -730,7 +718,7 @@
+ server.quit()
+
+ A Unix-only alternative uses sendmail. The location of the sendmail program
+-varies between systems; sometimes it is ``/usr/lib/sendmail``, sometime
++varies between systems; sometimes it is ``/usr/lib/sendmail``, sometimes
+ ``/usr/sbin/sendmail``. The sendmail manual page will help you out. Here's
+ some sample code::
+
+@@ -797,7 +785,7 @@
+ The :mod:`marshal` module provides very fast ways to store noncircular basic
+ Python types to files and strings, and back again. Although marshal does not do
+ fancy things like store instances or handle shared references properly, it does
+-run extremely fast. For example loading a half megabyte of data may take less
++run extremely fast. For example, loading a half megabyte of data may take less
+ than a third of a second. This often beats doing something more complex and
+ general such as using gdbm with pickle/shelve.
+
+@@ -807,9 +795,9 @@
+
+ .. XXX update this, default protocol is 2/3
+
+-The default format used by the pickle module is a slow one that results in
+-readable pickles. Making it the default, but it would break backward
+-compatibility::
++By default :mod:`pickle` uses a relatively old and slow format for backward
++compatibility. You can however specify other protocol versions that are
++faster::
+
+ largeString = 'z' * (100 * 1024)
+ myPickle = cPickle.dumps(largeString, protocol=1)
+diff -r 70274d53c1dd Doc/glossary.rst
+--- a/Doc/glossary.rst
++++ b/Doc/glossary.rst
+@@ -80,7 +80,7 @@
+
+ classic class
+ Any class which does not inherit from :class:`object`. See
+- :term:`new-style class`. Classic classes will be removed in Python 3.0.
++ :term:`new-style class`. Classic classes have been removed in Python 3.
+
+ coercion
+ The implicit conversion of an instance of one type to another during an
+@@ -152,9 +152,9 @@
+ For more information about descriptors' methods, see :ref:`descriptors`.
+
+ dictionary
+- An associative array, where arbitrary keys are mapped to values. The keys
+- can be any object with :meth:`__hash__` function and :meth:`__eq__`
+- methods. Called a hash in Perl.
++ An associative array, where arbitrary keys are mapped to values. The
++ keys can be any object with :meth:`__hash__` and :meth:`__eq__` methods.
++ Called a hash in Perl.
+
+ docstring
+ A string literal which appears as the first expression in a class,
+@@ -200,7 +200,7 @@
+ An object exposing a file-oriented API (with methods such as
+ :meth:`read()` or :meth:`write()`) to an underlying resource. Depending
+ on the way it was created, a file object can mediate access to a real
+- on-disk file or to another other type of storage or communication device
++ on-disk file or to another type of storage or communication device
+ (for example standard input/output, in-memory buffers, sockets, pipes,
+ etc.). File objects are also called :dfn:`file-like objects` or
+ :dfn:`streams`.
+@@ -406,7 +406,7 @@
+ :meth:`str.lower` method can serve as a key function for case insensitive
+ sorts. Alternatively, an ad-hoc key function can be built from a
+ :keyword:`lambda` expression such as ``lambda r: (r[0], r[2])``. Also,
+- the :mod:`operator` module provides three key function constuctors:
++ the :mod:`operator` module provides three key function constructors:
+ :func:`~operator.attrgetter`, :func:`~operator.itemgetter`, and
+ :func:`~operator.methodcaller`. See the :ref:`Sorting HOW TO
+ <sortinghowto>` for examples of how to create and use key functions.
+diff -r 70274d53c1dd Doc/howto/advocacy.rst
+--- a/Doc/howto/advocacy.rst
++++ b/Doc/howto/advocacy.rst
+@@ -264,8 +264,7 @@
+
+ **What are the restrictions on Python's use?**
+
+-They're practically nonexistent. Consult the :file:`Misc/COPYRIGHT` file in the
+-source distribution, or the section :ref:`history-and-license` for the full
++They're practically nonexistent. Consult :ref:`history-and-license` for the full
+ language, but it boils down to three conditions:
+
+ * You have to leave the copyright notice on the software; if you don't include
+diff -r 70274d53c1dd Doc/howto/argparse.rst
+--- /dev/null
++++ b/Doc/howto/argparse.rst
+@@ -0,0 +1,764 @@
++*****************
++Argparse Tutorial
++*****************
++
++:author: Tshepang Lekhonkhobe <tshepang@gmail.com>
++
++.. _argparse-tutorial:
++
++This tutorial is intended to be a gentle introduction to :mod:`argparse`, the
++recommended command-line parsing module in the Python standard library.
++
++.. note::
++
++ There's two other modules that fulfill the same task, namely
++ :mod:`getopt` (an equivalent for :c:func:`getopt` from the C
++ language) and the deprecated :mod:`optparse`.
++ Note also that :mod:`argparse` is based on :mod:`optparse`,
++ and therefore very similar in terms of usage.
++
++
++Concepts
++========
++
++Let's show the sort of functionality that we are going to explore in this
++introductory tutorial by making use of the :command:`ls` command:
++
++.. code-block:: sh
++
++ $ ls
++ cpython devguide prog.py pypy rm-unused-function.patch
++ $ ls pypy
++ ctypes_configure demo dotviewer include lib_pypy lib-python ...
++ $ ls -l
++ total 20
++ drwxr-xr-x 19 wena wena 4096 Feb 18 18:51 cpython
++ drwxr-xr-x 4 wena wena 4096 Feb 8 12:04 devguide
++ -rwxr-xr-x 1 wena wena 535 Feb 19 00:05 prog.py
++ drwxr-xr-x 14 wena wena 4096 Feb 7 00:59 pypy
++ -rw-r--r-- 1 wena wena 741 Feb 18 01:01 rm-unused-function.patch
++ $ ls --help
++ Usage: ls [OPTION]... [FILE]...
++ List information about the FILEs (the current directory by default).
++ Sort entries alphabetically if none of -cftuvSUX nor --sort is specified.
++ ...
++
++A few concepts we can learn from the four commands:
++
++* The :command:`ls` command is useful when run without any options at all. It defaults
++ to displaying the contents of the current directory.
++
++* If we want beyond what it provides by default, we tell it a bit more. In
++ this case, we want it to display a different directory, ``pypy``.
++ What we did is specify what is known as a positional argument. It's named so
++ because the program should know what to do with the value, solely based on
++ where it appears on the command line. This concept is more relevant
++ to a command like :command:`cp`, whose most basic usage is ``cp SRC DEST``.
++ The first position is *what you want copied,* and the second
++ position is *where you want it copied to*.
++
++* Now, say we want to change behaviour of the program. In our example,
++ we display more info for each file instead of just showing the file names.
++ The ``-l`` in that case is known as an optional argument.
++
++* That's a snippet of the help text. It's very useful in that you can
++ come across a program you have never used before, and can figure out
++ how it works simply by reading it's help text.
++
++
++The basics
++==========
++
++Let us start with a very simple example which does (almost) nothing::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.parse_args()
++
++Following is a result of running the code:
++
++.. code-block:: sh
++
++ $ python prog.py
++ $ python prog.py --help
++ usage: prog.py [-h]
++
++ optional arguments:
++ -h, --help show this help message and exit
++ $ python prog.py --verbose
++ usage: prog.py [-h]
++ prog.py: error: unrecognized arguments: --verbose
++ $ python prog.py foo
++ usage: prog.py [-h]
++ prog.py: error: unrecognized arguments: foo
++
++Here is what is happening:
++
++* Running the script without any options results in nothing displayed to
++ stdout. Not so useful.
++
++* The second one starts to display the usefulness of the :mod:`argparse`
++ module. We have done almost nothing, but already we get a nice help message.
++
++* The ``--help`` option, which can also be shortened to ``-h``, is the only
++ option we get for free (i.e. no need to specify it). Specifying anything
++ else results in an error. But even then, we do get a useful usage message,
++ also for free.
++
++
++Introducing Positional arguments
++================================
++
++An example::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("echo")
++ args = parser.parse_args()
++ print args.echo
++
++And running the code:
++
++.. code-block:: sh
++
++ $ python prog.py
++ usage: prog.py [-h] echo
++ prog.py: error: the following arguments are required: echo
++ $ python prog.py --help
++ usage: prog.py [-h] echo
++
++ positional arguments:
++ echo
++
++ optional arguments:
++ -h, --help show this help message and exit
++ $ python prog.py foo
++ foo
++
++Here is what's happening:
++
++* We've added the :meth:`add_argument` method, which is what we use to specify
++ which command-line options the program is willing to accept. In this case,
++ I've named it ``echo`` so that it's in line with its function.
++
++* Calling our program now requires us to specify an option.
++
++* The :meth:`parse_args` method actually returns some data from the
++ options specified, in this case, ``echo``.
++
++* The variable is some form of 'magic' that :mod:`argparse` performs for free
++ (i.e. no need to specify which variable that value is stored in).
++ You will also notice that its name matches the string argument given
++ to the method, ``echo``.
++
++Note however that, although the help display looks nice and all, it currently
++is not as helpful as it can be. For example we see that we got ``echo`` as a
++positional argument, but we don't know what it does, other than by guessing or
++by reading the source code. So, let's make it a bit more useful::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("echo", help="echo the string you use here")
++ args = parser.parse_args()
++ print args.echo
++
++And we get:
++
++.. code-block:: sh
++
++ $ python prog.py -h
++ usage: prog.py [-h] echo
++
++ positional arguments:
++ echo echo the string you use here
++
++ optional arguments:
++ -h, --help show this help message and exit
++
++Now, how about doing something even more useful::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("square", help="display a square of a given number")
++ args = parser.parse_args()
++ print args.square**2
++
++Following is a result of running the code:
++
++.. code-block:: sh
++
++ $ python prog.py 4
++ Traceback (most recent call last):
++ File "prog.py", line 5, in <module>
++ print args.square**2
++ TypeError: unsupported operand type(s) for ** or pow(): 'str' and 'int'
++
++That didn't go so well. That's because :mod:`argparse` treats the options we
++give it as strings, unless we tell it otherwise. So, let's tell
++:mod:`argparse` to treat that input as an integer::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("square", help="display a square of a given number",
++ type=int)
++ args = parser.parse_args()
++ print args.square**2
++
++Following is a result of running the code:
++
++.. code-block:: sh
++
++ $ python prog.py 4
++ 16
++ $ python prog.py four
++ usage: prog.py [-h] square
++ prog.py: error: argument square: invalid int value: 'four'
++
++That went well. The program now even helpfully quits on bad illegal input
++before proceeding.
++
++
++Introducing Optional arguments
++==============================
++
++So far we, have been playing with positional arguments. Let us
++have a look on how to add optional ones::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("--verbosity", help="increase output verbosity")
++ args = parser.parse_args()
++ if args.verbosity:
++ print "verbosity turned on"
++
++And the output:
++
++.. code-block:: sh
++
++ $ python prog.py --verbosity 1
++ verbosity turned on
++ $ python prog.py
++ $ python prog.py --help
++ usage: prog.py [-h] [--verbosity VERBOSITY]
++
++ optional arguments:
++ -h, --help show this help message and exit
++ --verbosity VERBOSITY
++ increase output verbosity
++ $ python prog.py --verbosity
++ usage: prog.py [-h] [--verbosity VERBOSITY]
++ prog.py: error: argument --verbosity: expected one argument
++
++Here is what is happening:
++
++* The program is written so as to display something when ``--verbosity`` is
++ specified and display nothing when not.
++
++* To show that the option is actually optional, there is no error when running
++ the program without it. Note that by default, if an optional argument isn't
++ used, the relevant variable, in this case :attr:`args.verbosity`, is
++ given ``None`` as a value, which is the reason it fails the truth
++ test of the :keyword:`if` statement.
++
++* The help message is a bit different.
++
++* When using the ``--verbosity`` option, one must also specify some value,
++ any value.
++
++The above example accepts arbitrary integer values for ``--verbosity``, but for
++our simple program, only two values are actually useful, ``True`` or ``False``.
++Let's modify the code accordingly::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("--verbose", help="increase output verbosity",
++ action="store_true")
++ args = parser.parse_args()
++ if args.verbose:
++ print "verbosity turned on"
++
++And the output:
++
++.. code-block:: sh
++
++ $ python prog.py --verbose
++ verbosity turned on
++ $ python prog.py --verbose 1
++ usage: prog.py [-h] [--verbose]
++ prog.py: error: unrecognized arguments: 1
++ $ python prog.py --help
++ usage: prog.py [-h] [--verbose]
++
++ optional arguments:
++ -h, --help show this help message and exit
++ --verbose increase output verbosity
++
++Here is what is happening:
++
++* The option is now more of a flag than something that requires a value.
++ We even changed the name of the option to match that idea.
++ Note that we now specify a new keyword, ``action``, and give it the value
++ ``"store_true"``. This means that, if the option is specified,
++ assign the value ``True`` to :data:`args.verbose`.
++ Not specifying it implies ``False``.
++
++* It complains when you specify a value, in true spirit of what flags
++ actually are.
++
++* Notice the different help text.
++
++
++Short options
++-------------
++
++If you are familiar with command line usage,
++you will notice that I haven't yet touched on the topic of short
++versions of the options. It's quite simple::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("-v", "--verbose", help="increase output verbosity",
++ action="store_true")
++ args = parser.parse_args()
++ if args.verbose:
++ print "verbosity turned on"
++
++And here goes:
++
++.. code-block:: sh
++
++ $ python prog.py -v
++ verbosity turned on
++ $ python prog.py --help
++ usage: prog.py [-h] [-v]
++
++ optional arguments:
++ -h, --help show this help message and exit
++ -v, --verbose increase output verbosity
++
++Note that the new ability is also reflected in the help text.
++
++
++Combining Positional and Optional arguments
++===========================================
++
++Our program keeps growing in complexity::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("square", type=int,
++ help="display a square of a given number")
++ parser.add_argument("-v", "--verbose", action="store_true",
++ help="increase output verbosity")
++ args = parser.parse_args()
++ answer = args.square**2
++ if args.verbose:
++ print "the square of {} equals {}".format(args.square, answer)
++ else:
++ print answer
++
++And now the output:
++
++.. code-block:: sh
++
++ $ python prog.py
++ usage: prog.py [-h] [-v] square
++ prog.py: error: the following arguments are required: square
++ $ python prog.py 4
++ 16
++ $ python prog.py 4 --verbose
++ the square of 4 equals 16
++ $ python prog.py --verbose 4
++ the square of 4 equals 16
++
++* We've brought back a positional argument, hence the complaint.
++
++* Note that the order does not matter.
++
++How about we give this program of ours back the ability to have
++multiple verbosity values, and actually get to use them::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("square", type=int,
++ help="display a square of a given number")
++ parser.add_argument("-v", "--verbosity", type=int,
++ help="increase output verbosity")
++ args = parser.parse_args()
++ answer = args.square**2
++ if args.verbosity == 2:
++ print "the square of {} equals {}".format(args.square, answer)
++ elif args.verbosity == 1:
++ print "{}^2 == {}".format(args.square, answer)
++ else:
++ print answer
++
++And the output:
++
++.. code-block:: sh
++
++ $ python prog.py 4
++ 16
++ $ python prog.py 4 -v
++ usage: prog.py [-h] [-v VERBOSITY] square
++ prog.py: error: argument -v/--verbosity: expected one argument
++ $ python prog.py 4 -v 1
++ 4^2 == 16
++ $ python prog.py 4 -v 2
++ the square of 4 equals 16
++ $ python prog.py 4 -v 3
++ 16
++
++These all look good except the last one, which exposes a bug in our program.
++Let's fix it by restricting the values the ``--verbosity`` option can accept::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("square", type=int,
++ help="display a square of a given number")
++ parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2],
++ help="increase output verbosity")
++ args = parser.parse_args()
++ answer = args.square**2
++ if args.verbosity == 2:
++ print "the square of {} equals {}".format(args.square, answer)
++ elif args.verbosity == 1:
++ print "{}^2 == {}".format(args.square, answer)
++ else:
++ print answer
++
++And the output:
++
++.. code-block:: sh
++
++ $ python prog.py 4 -v 3
++ usage: prog.py [-h] [-v {0,1,2}] square
++ prog.py: error: argument -v/--verbosity: invalid choice: 3 (choose from 0, 1, 2)
++ $ python prog.py 4 -h
++ usage: prog.py [-h] [-v {0,1,2}] square
++
++ positional arguments:
++ square display a square of a given number
++
++ optional arguments:
++ -h, --help show this help message and exit
++ -v {0,1,2}, --verbosity {0,1,2}
++ increase output verbosity
++
++Note that the change also reflects both in the error message as well as the
++help string.
++
++Now, let's use a different approach of playing with verbosity, which is pretty
++common. It also matches the way the CPython executable handles its own
++verbosity argument (check the output of ``python --help``)::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("square", type=int,
++ help="display the square of a given number")
++ parser.add_argument("-v", "--verbosity", action="count",
++ help="increase output verbosity")
++ args = parser.parse_args()
++ answer = args.square**2
++ if args.verbosity == 2:
++ print "the square of {} equals {}".format(args.square, answer)
++ elif args.verbosity == 1:
++ print "{}^2 == {}".format(args.square, answer)
++ else:
++ print answer
++
++We have introduced another action, "count",
++to count the number of occurences of a specific optional arguments:
++
++.. code-block:: sh
++
++ $ python prog.py 4
++ 16
++ $ python prog.py 4 -v
++ 4^2 == 16
++ $ python prog.py 4 -vv
++ the square of 4 equals 16
++ $ python prog.py 4 --verbosity --verbosity
++ the square of 4 equals 16
++ $ python prog.py 4 -v 1
++ usage: prog.py [-h] [-v] square
++ prog.py: error: unrecognized arguments: 1
++ $ python prog.py 4 -h
++ usage: prog.py [-h] [-v] square
++
++ positional arguments:
++ square display a square of a given number
++
++ optional arguments:
++ -h, --help show this help message and exit
++ -v, --verbosity increase output verbosity
++ $ python prog.py 4 -vvv
++ 16
++
++* Yes, it's now more of a flag (similar to ``action="store_true"``) in the
++ previous version of our script. That should explain the complaint.
++
++* It also behaves similar to "store_true" action.
++
++* Now here's a demonstration of what the "count" action gives. You've probably
++ seen this sort of usage before.
++
++* And, just like the "store_true" action, if you don't specify the ``-v`` flag,
++ that flag is considered to have ``None`` value.
++
++* As should be expected, specifying the long form of the flag, we should get
++ the same output.
++
++* Sadly, our help output isn't very informative on the new ability our script
++ has acquired, but that can always be fixed by improving the documentation for
++ out script (e.g. via the ``help`` keyword argument).
++
++* That last output exposes a bug in our program.
++
++
++Let's fix::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("square", type=int,
++ help="display a square of a given number")
++ parser.add_argument("-v", "--verbosity", action="count",
++ help="increase output verbosity")
++ args = parser.parse_args()
++ answer = args.square**2
++
++ # bugfix: replace == with >=
++ if args.verbosity >= 2:
++ print "the square of {} equals {}".format(args.square, answer)
++ elif args.verbosity >= 1:
++ print "{}^2 == {}".format(args.square, answer)
++ else:
++ print answer
++
++And this is what it gives:
++
++.. code-block:: sh
++
++ $ python prog.py 4 -vvv
++ the square of 4 equals 16
++ $ python prog.py 4 -vvvv
++ the square of 4 equals 16
++ $ python prog.py 4
++ Traceback (most recent call last):
++ File "prog.py", line 11, in <module>
++ if args.verbosity >= 2:
++ TypeError: unorderable types: NoneType() >= int()
++
++* First output went well, and fixes the bug we had before.
++ That is, we want any value >= 2 to be as verbose as possible.
++
++* Third output not so good.
++
++Let's fix that bug::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("square", type=int,
++ help="display a square of a given number")
++ parser.add_argument("-v", "--verbosity", action="count", default=0,
++ help="increase output verbosity")
++ args = parser.parse_args()
++ answer = args.square**2
++ if args.verbosity >= 2:
++ print "the square of {} equals {}".format(args.square, answer)
++ elif args.verbosity >= 1:
++ print "{}^2 == {}".format(args.square, answer)
++ else:
++ print answer
++
++We've just introduced yet another keyword, ``default``.
++We've set it to ``0`` in order to make it comparable to the other int values.
++Remember that by default,
++if an optional argument isn't specified,
++it gets the ``None`` value, and that cannot be compared to an int value
++(hence the :exc:`TypeError` exception).
++
++And:
++
++.. code-block:: sh
++
++ $ python prog.py 4
++ 16
++
++You can go quite far just with what we've learned so far,
++and we have only scratched the surface.
++The :mod:`argparse` module is very powerful,
++and we'll explore a bit more of it before we end this tutorial.
++
++
++Getting a little more advanced
++==============================
++
++What if we wanted to expand our tiny program to perform other powers,
++not just squares::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("x", type=int, help="the base")
++ parser.add_argument("y", type=int, help="the exponent")
++ parser.add_argument("-v", "--verbosity", action="count", default=0)
++ args = parser.parse_args()
++ answer = args.x**args.y
++ if args.verbosity >= 2:
++ print "{} to the power {} equals {}".format(args.x, args.y, answer)
++ elif args.verbosity >= 1:
++ print "{}^{} == {}".format(args.x, args.y, answer)
++ else:
++ print answer
++
++Output:
++
++.. code-block:: sh
++
++ $ python prog.py
++ usage: prog.py [-h] [-v] x y
++ prog.py: error: the following arguments are required: x, y
++ $ python prog.py -h
++ usage: prog.py [-h] [-v] x y
++
++ positional arguments:
++ x the base
++ y the exponent
++
++ optional arguments:
++ -h, --help show this help message and exit
++ -v, --verbosity
++ $ python prog.py 4 2 -v
++ 4^2 == 16
++
++
++Notice that so far we've been using verbosity level to *change* the text
++that gets displayed. The following example instead uses verbosity level
++to display *more* text instead::
++
++ import argparse
++ parser = argparse.ArgumentParser()
++ parser.add_argument("x", type=int, help="the base")
++ parser.add_argument("y", type=int, help="the exponent")
++ parser.add_argument("-v", "--verbosity", action="count", default=0)
++ args = parser.parse_args()
++ answer = args.x**args.y
++ if args.verbosity >= 2:
++ print "Running '{}'".format(__file__)
++ if args.verbosity >= 1:
++ print "{}^{} ==".format(args.x, args.y),
++ print answer
++
++Output:
++
++.. code-block:: sh
++
++ $ python prog.py 4 2
++ 16
++ $ python prog.py 4 2 -v
++ 4^2 == 16
++ $ python prog.py 4 2 -vv
++ Running 'prog.py'
++ 4^2 == 16
++
++
++Conflicting options
++-------------------
++
++So far, we have been working with two methods of an
++:class:`argparse.ArgumentParser` instance. Let's introduce a third one,
++:meth:`add_mutually_exclusive_group`. It allows for us to specify options that
++conflict with each other. Let's also change the rest of the program make the
++new functionality makes more sense:
++we'll introduce the ``--quiet`` option,
++which will be the opposite of the ``--verbose`` one::
++
++ import argparse
++
++ parser = argparse.ArgumentParser()
++ group = parser.add_mutually_exclusive_group()
++ group.add_argument("-v", "--verbose", action="store_true")
++ group.add_argument("-q", "--quiet", action="store_true")
++ parser.add_argument("x", type=int, help="the base")
++ parser.add_argument("y", type=int, help="the exponent")
++ args = parser.parse_args()
++ answer = args.x**args.y
++
++ if args.quiet:
++ print answer
++ elif args.verbose:
++ print "{} to the power {} equals {}".format(args.x, args.y, answer)
++ else:
++ print "{}^{} == {}".format(args.x, args.y, answer)
++
++Our program is now simpler, and we've lost some functionality for the sake of
++demonstration. Anyways, here's the output:
++
++.. code-block:: sh
++
++ $ python prog.py 4 2
++ 4^2 == 16
++ $ python prog.py 4 2 -q
++ 16
++ $ python prog.py 4 2 -v
++ 4 to the power 2 equals 16
++ $ python prog.py 4 2 -vq
++ usage: prog.py [-h] [-v | -q] x y
++ prog.py: error: argument -q/--quiet: not allowed with argument -v/--verbose
++ $ python prog.py 4 2 -v --quiet
++ usage: prog.py [-h] [-v | -q] x y
++ prog.py: error: argument -q/--quiet: not allowed with argument -v/--verbose
++
++That should be easy to follow. I've added that last output so you can see the
++sort of flexibility you get, i.e. mixing long form options with short form
++ones.
++
++Before we conclude, you probably want to tell your users the main purpose of
++your program, just in case they don't know::
++
++ import argparse
++
++ parser = argparse.ArgumentParser(description="calculate X to the power of Y")
++ group = parser.add_mutually_exclusive_group()
++ group.add_argument("-v", "--verbose", action="store_true")
++ group.add_argument("-q", "--quiet", action="store_true")
++ parser.add_argument("x", type=int, help="the base")
++ parser.add_argument("y", type=int, help="the exponent")
++ args = parser.parse_args()
++ answer = args.x**args.y
++
++ if args.quiet:
++ print answer
++ elif args.verbose:
++ print "{} to the power {} equals {}".format(args.x, args.y, answer)
++ else:
++ print "{}^{} == {}".format(args.x, args.y, answer)
++
++Note that slight difference in the usage text. Note the ``[-v | -q]``,
++which tells us that we can either use ``-v`` or ``-q``,
++but not both at the same time:
++
++.. code-block:: sh
++
++ $ python prog.py --help
++ usage: prog.py [-h] [-v | -q] x y
++
++ calculate X to the power of Y
++
++ positional arguments:
++ x the base
++ y the exponent
++
++ optional arguments:
++ -h, --help show this help message and exit
++ -v, --verbose
++ -q, --quiet
++
++
++Conclusion
++==========
++
++The :mod:`argparse` module offers a lot more than shown here.
++Its docs are quite detailed and thorough, and full of examples.
++Having gone through this tutorial, you should easily digest them
++without feeling overwhelmed.
+diff -r 70274d53c1dd Doc/howto/cporting.rst
+--- a/Doc/howto/cporting.rst
++++ b/Doc/howto/cporting.rst
+@@ -2,27 +2,28 @@
+
+ .. _cporting-howto:
+
+-********************************
+-Porting Extension Modules to 3.0
+-********************************
++*************************************
++Porting Extension Modules to Python 3
++*************************************
+
+ :author: Benjamin Peterson
+
+
+ .. topic:: Abstract
+
+- Although changing the C-API was not one of Python 3.0's objectives, the many
+- Python level changes made leaving 2.x's API intact impossible. In fact, some
+- changes such as :func:`int` and :func:`long` unification are more obvious on
+- the C level. This document endeavors to document incompatibilities and how
+- they can be worked around.
++ Although changing the C-API was not one of Python 3's objectives,
++ the many Python-level changes made leaving Python 2's API intact
++ impossible. In fact, some changes such as :func:`int` and
++ :func:`long` unification are more obvious on the C level. This
++ document endeavors to document incompatibilities and how they can
++ be worked around.
+
+
+ Conditional compilation
+ =======================
+
+-The easiest way to compile only some code for 3.0 is to check if
+-:c:macro:`PY_MAJOR_VERSION` is greater than or equal to 3. ::
++The easiest way to compile only some code for Python 3 is to check
++if :c:macro:`PY_MAJOR_VERSION` is greater than or equal to 3. ::
+
+ #if PY_MAJOR_VERSION >= 3
+ #define IS_PY3K
+@@ -35,7 +36,7 @@
+ Changes to Object APIs
+ ======================
+
+-Python 3.0 merged together some types with similar functions while cleanly
++Python 3 merged together some types with similar functions while cleanly
+ separating others.
+
+
+@@ -43,14 +44,14 @@
+ -----------------------
+
+
+-Python 3.0's :func:`str` (``PyString_*`` functions in C) type is equivalent to
+-2.x's :func:`unicode` (``PyUnicode_*``). The old 8-bit string type has become
+-:func:`bytes`. Python 2.6 and later provide a compatibility header,
++Python 3's :func:`str` (``PyString_*`` functions in C) type is equivalent to
++Python 2's :func:`unicode` (``PyUnicode_*``). The old 8-bit string type has
++become :func:`bytes`. Python 2.6 and later provide a compatibility header,
+ :file:`bytesobject.h`, mapping ``PyBytes`` names to ``PyString`` ones. For best
+-compatibility with 3.0, :c:type:`PyUnicode` should be used for textual data and
++compatibility with Python 3, :c:type:`PyUnicode` should be used for textual data and
+ :c:type:`PyBytes` for binary data. It's also important to remember that
+-:c:type:`PyBytes` and :c:type:`PyUnicode` in 3.0 are not interchangeable like
+-:c:type:`PyString` and :c:type:`PyUnicode` are in 2.x. The following example
++:c:type:`PyBytes` and :c:type:`PyUnicode` in Python 3 are not interchangeable like
++:c:type:`PyString` and :c:type:`PyUnicode` are in Python 2. The following example
+ shows best practices with regards to :c:type:`PyUnicode`, :c:type:`PyString`,
+ and :c:type:`PyBytes`. ::
+
+@@ -94,10 +95,12 @@
+ long/int Unification
+ --------------------
+
+-In Python 3.0, there is only one integer type. It is called :func:`int` on the
+-Python level, but actually corresponds to 2.x's :func:`long` type. In the
+-C-API, ``PyInt_*`` functions are replaced by their ``PyLong_*`` neighbors. The
+-best course of action here is using the ``PyInt_*`` functions aliased to
++Python 3 has only one integer type, :func:`int`. But it actually
++corresponds to Python 2's :func:`long` type--the :func:`int` type
++used in Python 2 was removed. In the C-API, ``PyInt_*`` functions
++are replaced by their ``PyLong_*`` equivalents.
++
++The best course of action here is using the ``PyInt_*`` functions aliased to
+ ``PyLong_*`` found in :file:`intobject.h`. The abstract ``PyNumber_*`` APIs
+ can also be used in some cases. ::
+
+@@ -120,10 +123,11 @@
+ Module initialization and state
+ ===============================
+
+-Python 3.0 has a revamped extension module initialization system. (See
+-:pep:`3121`.) Instead of storing module state in globals, they should be stored
+-in an interpreter specific structure. Creating modules that act correctly in
+-both 2.x and 3.0 is tricky. The following simple example demonstrates how. ::
++Python 3 has a revamped extension module initialization system. (See
++:pep:`3121`.) Instead of storing module state in globals, they should
++be stored in an interpreter specific structure. Creating modules that
++act correctly in both Python 2 and Python 3 is tricky. The following
++simple example demonstrates how. ::
+
+ #include "Python.h"
+
+@@ -223,15 +227,18 @@
+ you'll need to switch to Capsules.
+ :c:type:`CObject` was deprecated in 3.1 and 2.7 and completely removed in
+ Python 3.2. If you only support 2.7, or 3.1 and above, you
+-can simply switch to :c:type:`Capsule`. If you need to support 3.0 or
+-versions of Python earlier than 2.7 you'll have to support both CObjects
+-and Capsules.
++can simply switch to :c:type:`Capsule`. If you need to support Python 3.0,
++or versions of Python earlier than 2.7,
++you'll have to support both CObjects and Capsules.
++(Note that Python 3.0 is no longer supported, and it is not recommended
++for production use.)
+
+ The following example header file :file:`capsulethunk.h` may
+-solve the problem for you;
+-simply write your code against the :c:type:`Capsule` API, include
+-this header file after ``"Python.h"``, and you'll automatically use CObjects
+-in Python 3.0 or versions earlier than 2.7.
++solve the problem for you. Simply write your code against the
++:c:type:`Capsule` API and include this header file after
++:file:`Python.h`. Your code will automatically use Capsules
++in versions of Python with Capsules, and switch to CObjects
++when Capsules are unavailable.
+
+ :file:`capsulethunk.h` simulates Capsules using CObjects. However,
+ :c:type:`CObject` provides no place to store the capsule's "name". As a
+@@ -250,12 +257,12 @@
+ returns failure. (Since there's no way to store a name
+ in a CObject, noisy failure of :c:func:`PyCapsule_SetName`
+ was deemed preferable to silent failure here. If this is
+- inconveient, feel free to modify your local
++ inconvenient, feel free to modify your local
+ copy as you see fit.)
+
+ You can find :file:`capsulethunk.h` in the Python source distribution
+-in the :file:`Doc/includes` directory. We also include it here for
+-your reference; here is :file:`capsulethunk.h`:
++as :source:`Doc/includes/capsulethunk.h`. We also include it here for
++your convenience:
+
+ .. literalinclude:: ../includes/capsulethunk.h
+
+@@ -266,5 +273,5 @@
+
+ If you are writing a new extension module, you might consider `Cython
+ <http://www.cython.org>`_. It translates a Python-like language to C. The
+-extension modules it creates are compatible with Python 3.x and 2.x.
++extension modules it creates are compatible with Python 3 and Python 2.
+
+diff -r 70274d53c1dd Doc/howto/curses.rst
+--- a/Doc/howto/curses.rst
++++ b/Doc/howto/curses.rst
+@@ -118,7 +118,7 @@
+ A common problem when debugging a curses application is to get your terminal
+ messed up when the application dies without restoring the terminal to its
+ previous state. In Python this commonly happens when your code is buggy and
+-raises an uncaught exception. Keys are no longer be echoed to the screen when
++raises an uncaught exception. Keys are no longer echoed to the screen when
+ you type them, for example, which makes using the shell difficult.
+
+ In Python you can avoid these complications and make debugging much easier by
+@@ -271,7 +271,7 @@
+ highlight certain words. curses supports this by allowing you to specify an
+ attribute for each cell on the screen.
+
+-An attribute is a integer, each bit representing a different attribute. You can
++An attribute is an integer, each bit representing a different attribute. You can
+ try to display text with multiple attribute bits set, but curses doesn't
+ guarantee that all the possible combinations are available, or that they're all
+ visually distinct. That depends on the ability of the terminal being used, so
+@@ -300,7 +300,7 @@
+ curses.A_REVERSE)
+ stdscr.refresh()
+
+-The curses library also supports color on those terminals that provide it, The
++The curses library also supports color on those terminals that provide it. The
+ most common such terminal is probably the Linux console, followed by color
+ xterms.
+
+diff -r 70274d53c1dd Doc/howto/index.rst
+--- a/Doc/howto/index.rst
++++ b/Doc/howto/index.rst
+@@ -28,4 +28,5 @@
+ unicode.rst
+ urllib2.rst
+ webservers.rst
++ argparse.rst
+
+diff -r 70274d53c1dd Doc/howto/logging-cookbook.rst
+--- a/Doc/howto/logging-cookbook.rst
++++ b/Doc/howto/logging-cookbook.rst
+@@ -295,17 +295,17 @@
+ logger2.warning('Jail zesty vixen who grabbed pay from quack.')
+ logger2.error('The five boxing wizards jump quickly.')
+
+-At the receiving end, you can set up a receiver using the :mod:`socketserver`
++At the receiving end, you can set up a receiver using the :mod:`SocketServer`
+ module. Here is a basic working example::
+
+ import pickle
+ import logging
+ import logging.handlers
+- import socketserver
++ import SocketServer
+ import struct
+
+
+- class LogRecordStreamHandler(socketserver.StreamRequestHandler):
++ class LogRecordStreamHandler(SocketServer.StreamRequestHandler):
+ """Handler for a streaming logging request.
+
+ This basically logs the record using whatever logging policy is
+@@ -347,7 +347,7 @@
+ # cycles and network bandwidth!
+ logger.handle(record)
+
+- class LogRecordSocketReceiver(socketserver.ThreadingTCPServer):
++ class LogRecordSocketReceiver(SocketServer.ThreadingTCPServer):
+ """
+ Simple TCP socket-based logging receiver suitable for testing.
+ """
+@@ -357,7 +357,7 @@
+ def __init__(self, host='localhost',
+ port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
+ handler=LogRecordStreamHandler):
+- socketserver.ThreadingTCPServer.__init__(self, (host, port), handler)
++ SocketServer.ThreadingTCPServer.__init__(self, (host, port), handler)
+ self.abort = 0
+ self.timeout = 1
+ self.logname = None
+@@ -745,3 +745,48 @@
+ For more information about this configuration, you can see the `relevant
+ section <https://docs.djangoproject.com/en/1.3/topics/logging/#configuring-logging>`_
+ of the Django documentation.
++
++Inserting a BOM into messages sent to a SysLogHandler
++-----------------------------------------------------
++
++`RFC 5424 <http://tools.ietf.org/html/rfc5424>`_ requires that a
++Unicode message be sent to a syslog daemon as a set of bytes which have the
++following structure: an optional pure-ASCII component, followed by a UTF-8 Byte
++Order Mark (BOM), followed by Unicode encoded using UTF-8. (See the `relevant
++section of the specification <http://tools.ietf.org/html/rfc5424#section-6>`_.)
++
++In Python 2.6 and 2.7, code was added to
++:class:`~logging.handlers.SysLogHandler` to insert a BOM into the message, but
++unfortunately, it was implemented incorrectly, with the BOM appearing at the
++beginning of the message and hence not allowing any pure-ASCII component to
++appear before it.
++
++As this behaviour is broken, the incorrect BOM insertion code is being removed
++from Python 2.7.4 and later. However, it is not being replaced, and if you
++want to produce RFC 5424-compliant messages which include a BOM, an optional
++pure-ASCII sequence before it and arbitrary Unicode after it, encoded using
++UTF-8, then you need to do the following:
++
++#. Attach a :class:`~logging.Formatter` instance to your
++ :class:`~logging.handlers.SysLogHandler` instance, with a format string
++ such as::
++
++ u'ASCII section\ufeffUnicode section'
++
++ The Unicode code point ``u'\feff```, when encoded using UTF-8, will be
++ encoded as a UTF-8 BOM -- the byte-string ``'\xef\xbb\xbf'``.
++
++#. Replace the ASCII section with whatever placeholders you like, but make sure
++ that the data that appears in there after substitution is always ASCII (that
++ way, it will remain unchanged after UTF-8 encoding).
++
++#. Replace the Unicode section with whatever placeholders you like; if the data
++ which appears there after substitution contains characters outside the ASCII
++ range, that's fine -- it will be encoded using UTF-8.
++
++If the formatted message is Unicode, it *will* be encoded using UTF-8 encoding
++by ``SysLogHandler``. If you follow the above rules, you should be able to
++produce RFC 5424-compliant messages. If you don't, logging may not complain,
++but your messages will not be RFC 5424-compliant, and your syslog daemon may
++complain.
++
+diff -r 70274d53c1dd Doc/howto/logging.rst
+--- a/Doc/howto/logging.rst
++++ b/Doc/howto/logging.rst
+@@ -642,6 +642,21 @@
+ code approach, mainly separation of configuration and code and the ability of
+ noncoders to easily modify the logging properties.
+
++.. warning:: The :func:`fileConfig` function takes a default parameter,
++ ``disable_existing_loggers``, which defaults to ``True`` for reasons of
++ backward compatibility. This may or may not be what you want, since it
++ will cause any loggers existing before the :func:`fileConfig` call to
++ be disabled unless they (or an ancestor) are explicitly named in the
++ configuration. Please refer to the reference documentation for more
++ information, and specify ``False`` for this parameter if you wish.
++
++ The dictionary passed to :func:`dictConfig` can also specify a Boolean
++ value with key ``disable_existing_loggers``, which if not specified
++ explicitly in the dictionary also defaults to being interpreted as
++ ``True``. This leads to the logger-disabling behaviour described above,
++ which may not be what you want - in which case, provide the key
++ explicitly with a value of ``False``.
++
+ .. currentmodule:: logging
+
+ Note that the class names referenced in config files need to be either relative
+diff -r 70274d53c1dd Doc/howto/pyporting.rst
+--- a/Doc/howto/pyporting.rst
++++ b/Doc/howto/pyporting.rst
+@@ -39,7 +39,7 @@
+ Finally, you do have the option of :ref:`using 2to3 <use_2to3>` to translate
+ Python 2 code into Python 3 code (with some manual help). This can take the
+ form of branching your code and using 2to3 to start a Python 3 branch. You can
+-also have users perform the translation as installation time automatically so
++also have users perform the translation at installation time automatically so
+ that you only have to maintain a Python 2 codebase.
+
+ Regardless of which approach you choose, porting is not as hard or
+@@ -234,7 +234,7 @@
+ ``b'py'[1:2]`` is ``'y'`` in Python 2 and ``b'y'`` in Python 3 (i.e., close
+ enough).
+
+-You cannot concatenate bytes and strings in Python 3. But since in Python
++You cannot concatenate bytes and strings in Python 3. But since Python
+ 2 has bytes aliased to ``str``, it will succeed: ``b'a' + u'b'`` works in
+ Python 2, but ``b'a' + 'b'`` in Python 3 is a :exc:`TypeError`. A similar issue
+ also comes about when doing comparisons between bytes and strings.
+@@ -328,7 +328,7 @@
+ textual data, people have over the years been rather loose in their delineation
+ of what ``str`` instances held text compared to bytes. In Python 3 you cannot
+ be so care-free anymore and need to properly handle the difference. The key
+-handling this issue to make sure that **every** string literal in your
++handling this issue is to make sure that **every** string literal in your
+ Python 2 code is either syntactically of functionally marked as either bytes or
+ text data. After this is done you then need to make sure your APIs are designed
+ to either handle a specific type or made to be properly polymorphic.
+@@ -343,7 +343,7 @@
+ and then designating textual data with a ``u`` prefix or using the
+ ``unicode_literals`` future statement.
+
+-If your project supports versions of Python pre-dating 2.6, then you should use
++If your project supports versions of Python predating 2.6, then you should use
+ the six_ project and its ``b()`` function to denote bytes literals. For text
+ literals you can either use six's ``u()`` function or use a ``u`` prefix.
+
+@@ -439,7 +439,7 @@
+ There are two ways to solve this issue. One is to use a custom 2to3 fixer. The
+ blog post at http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/
+ specifies how to do this. That will allow 2to3 to change all instances of ``def
+-__unicode(self): ...`` to ``def __str__(self): ...``. This does require you
++__unicode(self): ...`` to ``def __str__(self): ...``. This does require that you
+ define your ``__str__()`` method in Python 2 before your ``__unicode__()``
+ method.
+
+diff -r 70274d53c1dd Doc/howto/regex.rst
+--- a/Doc/howto/regex.rst
++++ b/Doc/howto/regex.rst
+@@ -365,7 +365,7 @@
+
+ You can learn about this by interactively experimenting with the :mod:`re`
+ module. If you have Tkinter available, you may also want to look at
+-:file:`Tools/scripts/redemo.py`, a demonstration program included with the
++:source:`Tools/scripts/redemo.py`, a demonstration program included with the
+ Python distribution. It allows you to enter REs and strings, and displays
+ whether the RE matches or fails. :file:`redemo.py` can be quite useful when
+ trying to debug a complicated RE. Phil Schwartz's `Kodos
+@@ -501,7 +501,7 @@
+ the same ones in several locations, then it might be worthwhile to collect all
+ the definitions in one place, in a section of code that compiles all the REs
+ ahead of time. To take an example from the standard library, here's an extract
+-from :file:`xmllib.py`::
++from the deprecated :mod:`xmllib` module::
+
+ ref = re.compile( ... )
+ entityref = re.compile( ... )
+diff -r 70274d53c1dd Doc/howto/sockets.rst
+--- a/Doc/howto/sockets.rst
++++ b/Doc/howto/sockets.rst
+@@ -156,7 +156,7 @@
+ there, you may wait forever for the reply, because the request may still be in
+ your output buffer.
+
+-Now we come the major stumbling block of sockets - ``send`` and ``recv`` operate
++Now we come to the major stumbling block of sockets - ``send`` and ``recv`` operate
+ on the network buffers. They do not necessarily handle all the bytes you hand
+ them (or expect from them), because their major focus is handling the network
+ buffers. In general, they return when the associated network buffers have been
+@@ -167,7 +167,7 @@
+ When a ``recv`` returns 0 bytes, it means the other side has closed (or is in
+ the process of closing) the connection. You will not receive any more data on
+ this connection. Ever. You may be able to send data successfully; I'll talk
+-about that some on the next page.
++more about this later.
+
+ A protocol like HTTP uses a socket for only one transfer. The client sends a
+ request, then reads a reply. That's it. The socket is discarded. This means that
+diff -r 70274d53c1dd Doc/howto/sorting.rst
+--- a/Doc/howto/sorting.rst
++++ b/Doc/howto/sorting.rst
+@@ -124,7 +124,7 @@
+ ========================
+
+ Both :meth:`list.sort` and :func:`sorted` accept a *reverse* parameter with a
+-boolean value. This is using to flag descending sorts. For example, to get the
++boolean value. This is used to flag descending sorts. For example, to get the
+ student data in reverse *age* order:
+
+ >>> sorted(student_tuples, key=itemgetter(2), reverse=True)
+@@ -210,11 +210,11 @@
+ arguments. Instead, all of the Py2.x versions supported a *cmp* parameter to
+ handle user specified comparison functions.
+
+-In Py3.0, the *cmp* parameter was removed entirely (as part of a larger effort to
++In Python 3, the *cmp* parameter was removed entirely (as part of a larger effort to
+ simplify and unify the language, eliminating the conflict between rich
+ comparisons and the :meth:`__cmp__` magic method).
+
+-In Py2.x, sort allowed an optional function which can be called for doing the
++In Python 2, :meth:`~list.sort` allowed an optional function which can be called for doing the
+ comparisons. That function should take two arguments to be compared and then
+ return a negative value for less-than, return zero if they are equal, or return
+ a positive value for greater-than. For example, we can do:
+diff -r 70274d53c1dd Doc/howto/urllib2.rst
+--- a/Doc/howto/urllib2.rst
++++ b/Doc/howto/urllib2.rst
+@@ -439,12 +439,12 @@
+
+ When authentication is required, the server sends a header (as well as the 401
+ error code) requesting authentication. This specifies the authentication scheme
+-and a 'realm'. The header looks like : ``Www-authenticate: SCHEME
++and a 'realm'. The header looks like : ``WWW-Authenticate: SCHEME
+ realm="REALM"``.
+
+ e.g. ::
+
+- Www-authenticate: Basic realm="cPanel Users"
++ WWW-Authenticate: Basic realm="cPanel Users"
+
+
+ The client should then retry the request with the appropriate name and password
+diff -r 70274d53c1dd Doc/includes/sqlite3/execute_1.py
+--- a/Doc/includes/sqlite3/execute_1.py
++++ b/Doc/includes/sqlite3/execute_1.py
+@@ -1,11 +1,16 @@
+ import sqlite3
+
+-con = sqlite3.connect("mydb")
+-
++con = sqlite3.connect(":memory:")
+ cur = con.cursor()
++cur.execute("create table people (name_last, age)")
+
+ who = "Yeltsin"
+ age = 72
+
+-cur.execute("select name_last, age from people where name_last=? and age=?", (who, age))
++# This is the qmark style:
++cur.execute("insert into people values (?, ?)", (who, age))
++
++# And this is the named style:
++cur.execute("select * from people where name_last=:who and age=:age", {"who": who, "age": age})
++
+ print cur.fetchone()
+diff -r 70274d53c1dd Doc/includes/sqlite3/execute_2.py
+--- a/Doc/includes/sqlite3/execute_2.py
++++ /dev/null
+@@ -1,12 +0,0 @@
+-import sqlite3
+-
+-con = sqlite3.connect("mydb")
+-
+-cur = con.cursor()
+-
+-who = "Yeltsin"
+-age = 72
+-
+-cur.execute("select name_last, age from people where name_last=:who and age=:age",
+- {"who": who, "age": age})
+-print cur.fetchone()
+diff -r 70274d53c1dd Doc/includes/sqlite3/executemany_2.py
+--- a/Doc/includes/sqlite3/executemany_2.py
++++ b/Doc/includes/sqlite3/executemany_2.py
+@@ -1,8 +1,8 @@
+ import sqlite3
++import string
+
+ def char_generator():
+- import string
+- for c in string.letters[:26]:
++ for c in string.lowercase:
+ yield (c,)
+
+ con = sqlite3.connect(":memory:")
+diff -r 70274d53c1dd Doc/includes/sqlite3/rowclass.py
+--- a/Doc/includes/sqlite3/rowclass.py
++++ b/Doc/includes/sqlite3/rowclass.py
+@@ -1,12 +1,12 @@
+ import sqlite3
+
+-con = sqlite3.connect("mydb")
++con = sqlite3.connect(":memory:")
+ con.row_factory = sqlite3.Row
+
+ cur = con.cursor()
+-cur.execute("select name_last, age from people")
++cur.execute("select 'John' as name, 42 as age")
+ for row in cur:
+- assert row[0] == row["name_last"]
+- assert row["name_last"] == row["nAmE_lAsT"]
++ assert row[0] == row["name"]
++ assert row["name"] == row["nAmE"]
+ assert row[1] == row["age"]
+ assert row[1] == row["AgE"]
+diff -r 70274d53c1dd Doc/includes/sqlite3/text_factory.py
+--- a/Doc/includes/sqlite3/text_factory.py
++++ b/Doc/includes/sqlite3/text_factory.py
+@@ -3,9 +3,6 @@
+ con = sqlite3.connect(":memory:")
+ cur = con.cursor()
+
+-# Create the table
+-con.execute("create table person(lastname, firstname)")
+-
+ AUSTRIA = u"\xd6sterreich"
+
+ # by default, rows are returned as Unicode
+@@ -17,7 +14,7 @@
+ con.text_factory = str
+ cur.execute("select ?", (AUSTRIA,))
+ row = cur.fetchone()
+-assert type(row[0]) == str
++assert type(row[0]) is str
+ # the bytestrings will be encoded in UTF-8, unless you stored garbage in the
+ # database ...
+ assert row[0] == AUSTRIA.encode("utf-8")
+@@ -29,15 +26,15 @@
+ cur.execute("select ?", ("this is latin1 and would normally create errors" +
+ u"\xe4\xf6\xfc".encode("latin1"),))
+ row = cur.fetchone()
+-assert type(row[0]) == unicode
++assert type(row[0]) is unicode
+
+ # sqlite3 offers a built-in optimized text_factory that will return bytestring
+ # objects, if the data is in ASCII only, and otherwise return unicode objects
+ con.text_factory = sqlite3.OptimizedUnicode
+ cur.execute("select ?", (AUSTRIA,))
+ row = cur.fetchone()
+-assert type(row[0]) == unicode
++assert type(row[0]) is unicode
+
+ cur.execute("select ?", ("Germany",))
+ row = cur.fetchone()
+-assert type(row[0]) == str
++assert type(row[0]) is str
+diff -r 70274d53c1dd Doc/library/2to3.rst
+--- a/Doc/library/2to3.rst
++++ b/Doc/library/2to3.rst
+@@ -314,7 +314,7 @@
+
+ Converts ``raise E, V`` to ``raise E(V)``, and ``raise E, V, T`` to ``raise
+ E(V).with_traceback(T)``. If ``E`` is a tuple, the translation will be
+- incorrect because substituting tuples for exceptions has been removed in 3.0.
++ incorrect because substituting tuples for exceptions has been removed in Python 3.
+
+ .. 2to3fixer:: raw_input
+
+diff -r 70274d53c1dd Doc/library/__future__.rst
+--- a/Doc/library/__future__.rst
++++ b/Doc/library/__future__.rst
+@@ -75,7 +75,7 @@
+ | division | 2.2.0a2 | 3.0 | :pep:`238`: |
+ | | | | *Changing the Division Operator* |
+ +------------------+-------------+--------------+---------------------------------------------+
+-| absolute_import | 2.5.0a1 | 2.7 | :pep:`328`: |
++| absolute_import | 2.5.0a1 | 3.0 | :pep:`328`: |
+ | | | | *Imports: Multi-Line and Absolute/Relative* |
+ +------------------+-------------+--------------+---------------------------------------------+
+ | with_statement | 2.5.0a1 | 2.6 | :pep:`343`: |
+diff -r 70274d53c1dd Doc/library/_winreg.rst
+--- a/Doc/library/_winreg.rst
++++ b/Doc/library/_winreg.rst
+@@ -7,9 +7,9 @@
+ .. sectionauthor:: Mark Hammond <MarkH@ActiveState.com>
+
+ .. note::
+- The :mod:`_winreg` module has been renamed to :mod:`winreg` in Python 3.0.
++ The :mod:`_winreg` module has been renamed to :mod:`winreg` in Python 3.
+ The :term:`2to3` tool will automatically adapt imports when converting your
+- sources to 3.0.
++ sources to Python 3.
+
+
+ .. versionadded:: 2.0
+diff -r 70274d53c1dd Doc/library/al.rst
+--- a/Doc/library/al.rst
++++ b/Doc/library/al.rst
+@@ -8,7 +8,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`al` module has been deprecated for removal in Python 3.0.
++ The :mod:`al` module has been removed in Python 3.
+
+
+ This module provides access to the audio facilities of the SGI Indy and Indigo
+@@ -201,7 +201,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`AL` module has been deprecated for removal in Python 3.0.
++ The :mod:`AL` module has been removed in Python 3.
+
+
+ This module defines symbolic constants needed to use the built-in module
+diff -r 70274d53c1dd Doc/library/anydbm.rst
+--- a/Doc/library/anydbm.rst
++++ b/Doc/library/anydbm.rst
+@@ -6,9 +6,9 @@
+
+
+ .. note::
+- The :mod:`anydbm` module has been renamed to :mod:`dbm` in Python 3.0. The
++ The :mod:`anydbm` module has been renamed to :mod:`dbm` in Python 3. The
+ :term:`2to3` tool will automatically adapt imports when converting your
+- sources to 3.0.
++ sources to Python 3.
+
+ .. index::
+ module: dbhash
+diff -r 70274d53c1dd Doc/library/archiving.rst
+--- a/Doc/library/archiving.rst
++++ b/Doc/library/archiving.rst
+@@ -7,6 +7,7 @@
+
+ The modules described in this chapter support data compression with the zlib,
+ gzip, and bzip2 algorithms, and the creation of ZIP- and tar-format archives.
++See also :ref:`archiving-operations` provided by the :mod:`shutil` module.
+
+
+ .. toctree::
+diff -r 70274d53c1dd Doc/library/argparse.rst
+--- a/Doc/library/argparse.rst
++++ b/Doc/library/argparse.rst
+@@ -12,6 +12,12 @@
+
+ --------------
+
++.. sidebar:: Tutorial
++
++ This page contains the API reference information. For a more gentle
++ introduction to Python command-line parsing, have a look at the
++ :ref:`argparse tutorial <argparse-tutorial>`.
++
+ The :mod:`argparse` module makes it easy to write user-friendly command-line
+ interfaces. The program defines what arguments it requires, and :mod:`argparse`
+ will figure out how to parse those out of :data:`sys.argv`. The :mod:`argparse`
+@@ -743,7 +749,7 @@
+
+ * ``values`` - The associated command-line arguments, with any type conversions
+ applied. (Type conversions are specified with the type_ keyword argument to
+- :meth:`~ArgumentParser.add_argument`.
++ :meth:`~ArgumentParser.add_argument`.)
+
+ * ``option_string`` - The option string that was used to invoke this action.
+ The ``option_string`` argument is optional, and will be absent if the action
+@@ -1634,8 +1640,8 @@
+
+ --bar BAR bar help
+
+- Note that any arguments not your user defined groups will end up back in the
+- usual "positional arguments" and "optional arguments" sections.
++ Note that any arguments not in your user-defined groups will end up back
++ in the usual "positional arguments" and "optional arguments" sections.
+
+
+ Mutual exclusion
+@@ -1826,9 +1832,10 @@
+ * Replace all :meth:`optparse.OptionParser.add_option` calls with
+ :meth:`ArgumentParser.add_argument` calls.
+
+-* Replace ``options, args = parser.parse_args()`` with ``args =
++* Replace ``(options, args) = parser.parse_args()`` with ``args =
+ parser.parse_args()`` and add additional :meth:`ArgumentParser.add_argument`
+- calls for the positional arguments.
++ calls for the positional arguments. Keep in mind that what was previously
++ called ``options``, now in :mod:`argparse` context is called ``args``.
+
+ * Replace callback actions and the ``callback_*`` keyword arguments with
+ ``type`` or ``action`` arguments.
+diff -r 70274d53c1dd Doc/library/basehttpserver.rst
+--- a/Doc/library/basehttpserver.rst
++++ b/Doc/library/basehttpserver.rst
+@@ -6,8 +6,8 @@
+
+ .. note::
+ The :mod:`BaseHTTPServer` module has been merged into :mod:`http.server` in
+- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
+- converting your sources to 3.0.
++ Python 3. The :term:`2to3` tool will automatically adapt imports when
++ converting your sources to Python 3.
+
+
+ .. index::
+@@ -240,7 +240,7 @@
+ to create custom error logging mechanisms. The *format* argument is a
+ standard printf-style format string, where the additional arguments to
+ :meth:`log_message` are applied as inputs to the formatting. The client
+- address and current date and time are prefixed to every message logged.
++ ip address and current date and time are prefixed to every message logged.
+
+
+ .. method:: version_string()
+diff -r 70274d53c1dd Doc/library/bastion.rst
+--- a/Doc/library/bastion.rst
++++ b/Doc/library/bastion.rst
+@@ -7,7 +7,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`Bastion` module has been removed in Python 3.0.
++ The :mod:`Bastion` module has been removed in Python 3.
+
+ .. moduleauthor:: Barry Warsaw <bwarsaw@python.org>
+
+diff -r 70274d53c1dd Doc/library/bdb.rst
+--- a/Doc/library/bdb.rst
++++ b/Doc/library/bdb.rst
+@@ -20,7 +20,7 @@
+
+ The :mod:`bdb` module also defines two classes:
+
+-.. class:: Breakpoint(self, file, line[, temporary=0[, cond=None [, funcname=None]]])
++.. class:: Breakpoint(self, file, line, temporary=0, cond=None , funcname=None)
+
+ This class implements temporary breakpoints, ignore counts, disabling and
+ (re-)enabling, and conditionals.
+@@ -245,7 +245,7 @@
+ breakpoints. These methods return a string containing an error message if
+ something went wrong, or ``None`` if all is well.
+
+- .. method:: set_break(filename, lineno[, temporary=0[, cond[, funcname]]])
++ .. method:: set_break(filename, lineno, temporary=0, cond=None, funcname=None)
+
+ Set a new breakpoint. If the *lineno* line doesn't exist for the
+ *filename* passed as argument, return an error message. The *filename*
+diff -r 70274d53c1dd Doc/library/binascii.rst
+--- a/Doc/library/binascii.rst
++++ b/Doc/library/binascii.rst
+@@ -127,7 +127,7 @@
+ The return value is in the range [-2**31, 2**31-1]
+ regardless of platform. In the past the value would be signed on
+ some platforms and unsigned on others. Use & 0xffffffff on the
+- value if you want it to match 3.0 behavior.
++ value if you want it to match Python 3 behavior.
+
+ .. versionchanged:: 3.0
+ The return value is unsigned and in the range [0, 2**32-1]
+diff -r 70274d53c1dd Doc/library/bisect.rst
+--- a/Doc/library/bisect.rst
++++ b/Doc/library/bisect.rst
+@@ -123,9 +123,9 @@
+ a 'B', and so on::
+
+ >>> def grade(score, breakpoints=[60, 70, 80, 90], grades='FDCBA'):
+- ... i = bisect(breakpoints, score)
+- ... return grades[i]
+- ...
++ i = bisect(breakpoints, score)
++ return grades[i]
++
+ >>> [grade(score) for score in [33, 99, 77, 70, 89, 90, 100]]
+ ['F', 'A', 'C', 'C', 'B', 'A', 'A']
+
+diff -r 70274d53c1dd Doc/library/bsddb.rst
+--- a/Doc/library/bsddb.rst
++++ b/Doc/library/bsddb.rst
+@@ -7,7 +7,7 @@
+ .. sectionauthor:: Skip Montanaro <skip@pobox.com>
+
+ .. deprecated:: 2.6
+- The :mod:`bsddb` module has been deprecated for removal in Python 3.0.
++ The :mod:`bsddb` module has been removed in Python 3.
+
+
+ The :mod:`bsddb` module provides an interface to the Berkeley DB library. Users
+@@ -86,7 +86,7 @@
+ This is present *only* to allow backwards compatibility with systems which ship
+ with the old Berkeley DB 1.85 database library. The :mod:`bsddb185` module
+ should never be used directly in new code. The module has been removed in
+- Python 3.0. If you find you still need it look in PyPI.
++ Python 3. If you find you still need it look in PyPI.
+
+
+ .. seealso::
+diff -r 70274d53c1dd Doc/library/bz2.rst
+--- a/Doc/library/bz2.rst
++++ b/Doc/library/bz2.rst
+@@ -14,9 +14,6 @@
+ It implements a complete file interface, one-shot (de)compression functions, and
+ types for sequential (de)compression.
+
+-For other archive formats, see the :mod:`gzip`, :mod:`zipfile`, and
+-:mod:`tarfile` modules.
+-
+ Here is a summary of the features offered by the bz2 module:
+
+ * :class:`BZ2File` class implements a complete file interface, including
+diff -r 70274d53c1dd Doc/library/carbon.rst
+--- a/Doc/library/carbon.rst
++++ b/Doc/library/carbon.rst
+@@ -24,7 +24,7 @@
+
+ .. note::
+
+- The Carbon modules have been removed in Python 3.0.
++ The Carbon modules have been removed in Python 3.
+
+
+ :mod:`Carbon.AE` --- Apple Events
+diff -r 70274d53c1dd Doc/library/cd.rst
+--- a/Doc/library/cd.rst
++++ b/Doc/library/cd.rst
+@@ -9,7 +9,7 @@
+
+
+ .. deprecated:: 2.6
+- The :mod:`cd` module has been deprecated for removal in Python 3.0.
++ The :mod:`cd` module has been removed in Python 3.
+
+
+ This module provides an interface to the Silicon Graphics CD library. It is
+diff -r 70274d53c1dd Doc/library/cgi.rst
+--- a/Doc/library/cgi.rst
++++ b/Doc/library/cgi.rst
+@@ -284,10 +284,10 @@
+ algorithms implemented in this module in other circumstances.
+
+
+-.. function:: parse(fp[, keep_blank_values[, strict_parsing]])
++.. function:: parse(fp[, environ[, keep_blank_values[, strict_parsing]]])
+
+ Parse a query in the environment or from a file (the file defaults to
+- ``sys.stdin``). The *keep_blank_values* and *strict_parsing* parameters are
++ ``sys.stdin`` and environment defaults to ``os.environ``). The *keep_blank_values* and *strict_parsing* parameters are
+ passed to :func:`urlparse.parse_qs` unchanged.
+
+
+diff -r 70274d53c1dd Doc/library/cgihttpserver.rst
+--- a/Doc/library/cgihttpserver.rst
++++ b/Doc/library/cgihttpserver.rst
+@@ -8,8 +8,8 @@
+
+ .. note::
+ The :mod:`CGIHTTPServer` module has been merged into :mod:`http.server` in
+- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
+- converting your sources to 3.0.
++ Python 3. The :term:`2to3` tool will automatically adapt imports when
++ converting your sources to Python 3.
+
+
+ The :mod:`CGIHTTPServer` module defines a request-handler class, interface
+diff -r 70274d53c1dd Doc/library/collections.rst
+--- a/Doc/library/collections.rst
++++ b/Doc/library/collections.rst
+@@ -601,47 +601,49 @@
+
+ >>> Point = namedtuple('Point', ['x', 'y'], verbose=True)
+ class Point(tuple):
+- 'Point(x, y)'
++ 'Point(x, y)'
+ <BLANKLINE>
+- __slots__ = ()
++ __slots__ = ()
+ <BLANKLINE>
+- _fields = ('x', 'y')
++ _fields = ('x', 'y')
+ <BLANKLINE>
+- def __new__(_cls, x, y):
+- 'Create a new instance of Point(x, y)'
+- return _tuple.__new__(_cls, (x, y))
++ def __new__(_cls, x, y):
++ 'Create a new instance of Point(x, y)'
++ return _tuple.__new__(_cls, (x, y))
+ <BLANKLINE>
+- @classmethod
+- def _make(cls, iterable, new=tuple.__new__, len=len):
+- 'Make a new Point object from a sequence or iterable'
+- result = new(cls, iterable)
+- if len(result) != 2:
+- raise TypeError('Expected 2 arguments, got %d' % len(result))
+- return result
++ @classmethod
++ def _make(cls, iterable, new=tuple.__new__, len=len):
++ 'Make a new Point object from a sequence or iterable'
++ result = new(cls, iterable)
++ if len(result) != 2:
++ raise TypeError('Expected 2 arguments, got %d' % len(result))
++ return result
+ <BLANKLINE>
+- def __repr__(self):
+- 'Return a nicely formatted representation string'
+- return 'Point(x=%r, y=%r)' % self
++ def __repr__(self):
++ 'Return a nicely formatted representation string'
++ return 'Point(x=%r, y=%r)' % self
+ <BLANKLINE>
+- def _asdict(self):
+- 'Return a new OrderedDict which maps field names to their values'
+- return OrderedDict(zip(self._fields, self))
++ def _asdict(self):
++ 'Return a new OrderedDict which maps field names to their values'
++ return OrderedDict(zip(self._fields, self))
+ <BLANKLINE>
+- __dict__ = property(_asdict)
++ __dict__ = property(_asdict)
+ <BLANKLINE>
+- def _replace(_self, **kwds):
+- 'Return a new Point object replacing specified fields with new values'
+- result = _self._make(map(kwds.pop, ('x', 'y'), _self))
+- if kwds:
+- raise ValueError('Got unexpected field names: %r' % kwds.keys())
+- return result
++ def _replace(_self, **kwds):
++ 'Return a new Point object replacing specified fields with new values'
++ result = _self._make(map(kwds.pop, ('x', 'y'), _self))
++ if kwds:
++ raise ValueError('Got unexpected field names: %r' % kwds.keys())
++ return result
+ <BLANKLINE>
+- def __getnewargs__(self):
+- 'Return self as a plain tuple. Used by copy and pickle.'
+- return tuple(self)
++ def __getnewargs__(self):
++ 'Return self as a plain tuple. Used by copy and pickle.'
++ return tuple(self)
+ <BLANKLINE>
+- x = _property(_itemgetter(0), doc='Alias for field number 0')
+- y = _property(_itemgetter(1), doc='Alias for field number 1')
++ x = _property(_itemgetter(0), doc='Alias for field number 0')
++ <BLANKLINE>
++ y = _property(_itemgetter(1), doc='Alias for field number 1')
++ <BLANKLINE>
+
+ >>> p = Point(11, y=22) # instantiate with positional or keyword arguments
+ >>> p[0] + p[1] # indexable like the plain tuple (11, 22)
+diff -r 70274d53c1dd Doc/library/commands.rst
+--- a/Doc/library/commands.rst
++++ b/Doc/library/commands.rst
+@@ -8,7 +8,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`commands` module has been removed in Python 3.0. Use the
++ The :mod:`commands` module has been removed in Python 3. Use the
+ :mod:`subprocess` module instead.
+
+ .. sectionauthor:: Sue Williams <sbw@provis.com>
+diff -r 70274d53c1dd Doc/library/compiler.rst
+--- a/Doc/library/compiler.rst
++++ b/Doc/library/compiler.rst
+@@ -6,7 +6,7 @@
+ ***********************
+
+ .. deprecated:: 2.6
+- The :mod:`compiler` package has been removed in Python 3.0.
++ The :mod:`compiler` package has been removed in Python 3.
+
+ .. sectionauthor:: Jeremy Hylton <jeremy@zope.com>
+
+diff -r 70274d53c1dd Doc/library/configparser.rst
+--- a/Doc/library/configparser.rst
++++ b/Doc/library/configparser.rst
+@@ -12,8 +12,8 @@
+ .. note::
+
+ The :mod:`ConfigParser` module has been renamed to :mod:`configparser` in
+- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
+- converting your sources to 3.0.
++ Python 3. The :term:`2to3` tool will automatically adapt imports when
++ converting your sources to Python 3.
+
+ .. index::
+ pair: .ini; file
+diff -r 70274d53c1dd Doc/library/cookie.rst
+--- a/Doc/library/cookie.rst
++++ b/Doc/library/cookie.rst
+@@ -8,8 +8,8 @@
+
+ .. note::
+ The :mod:`Cookie` module has been renamed to :mod:`http.cookies` in Python
+- 3.0. The :term:`2to3` tool will automatically adapt imports when converting
+- your sources to 3.0.
++ 3. The :term:`2to3` tool will automatically adapt imports when converting
++ your sources to Python 3.
+
+ **Source code:** :source:`Lib/Cookie.py`
+
+@@ -22,8 +22,14 @@
+
+ The module formerly strictly applied the parsing rules described in the
+ :rfc:`2109` and :rfc:`2068` specifications. It has since been discovered that
+-MSIE 3.0x doesn't follow the character rules outlined in those specs. As a
+-result, the parsing rules used are a bit less strict.
++MSIE 3.0x doesn't follow the character rules outlined in those specs and also
++many current day browsers and servers have relaxed parsing rules when comes to
++Cookie handling. As a result, the parsing rules used are a bit less strict.
++
++The character set, :data:`string.ascii_letters`, :data:`string.digits` and
++``!#$%&'*+-.^_`|~`` denote the set of valid characters allowed by this module
++in Cookie name (as :attr:`~Morsel.key`).
++
+
+ .. note::
+
+diff -r 70274d53c1dd Doc/library/cookielib.rst
+--- a/Doc/library/cookielib.rst
++++ b/Doc/library/cookielib.rst
+@@ -8,8 +8,8 @@
+
+ .. note::
+ The :mod:`cookielib` module has been renamed to :mod:`http.cookiejar` in
+- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
+- converting your sources to 3.0.
++ Python 3. The :term:`2to3` tool will automatically adapt imports when
++ converting your sources to Python 3.
+
+ .. versionadded:: 2.4
+
+diff -r 70274d53c1dd Doc/library/copy_reg.rst
+--- a/Doc/library/copy_reg.rst
++++ b/Doc/library/copy_reg.rst
+@@ -5,9 +5,9 @@
+ :synopsis: Register pickle support functions.
+
+ .. note::
+- The :mod:`copy_reg` module has been renamed to :mod:`copyreg` in Python 3.0.
++ The :mod:`copy_reg` module has been renamed to :mod:`copyreg` in Python 3.
+ The :term:`2to3` tool will automatically adapt imports when converting your
+- sources to 3.0.
++ sources to Python 3.
+
+ .. index::
+ module: pickle
+diff -r 70274d53c1dd Doc/library/csv.rst
+--- a/Doc/library/csv.rst
++++ b/Doc/library/csv.rst
+@@ -40,7 +40,7 @@
+ This version of the :mod:`csv` module doesn't support Unicode input. Also,
+ there are currently some issues regarding ASCII NUL characters. Accordingly,
+ all input should be UTF-8 or printable ASCII to be safe; see the examples in
+- section :ref:`csv-examples`. These restrictions will be removed in the future.
++ section :ref:`csv-examples`.
+
+
+ .. seealso::
+@@ -162,7 +162,7 @@
+ The :mod:`csv` module defines the following classes:
+
+
+-.. class:: DictReader(csvfile[, fieldnames=None[, restkey=None[, restval=None[, dialect='excel'[, *args, **kwds]]]]])
++.. class:: DictReader(csvfile, fieldnames=None, restkey=None, restval=None, dialect='excel', *args, **kwds)
+
+ Create an object which operates like a regular reader but maps the information
+ read into a dict whose keys are given by the optional *fieldnames* parameter.
+@@ -175,7 +175,7 @@
+ the underlying :class:`reader` instance.
+
+
+-.. class:: DictWriter(csvfile, fieldnames[, restval=''[, extrasaction='raise'[, dialect='excel'[, *args, **kwds]]]])
++.. class:: DictWriter(csvfile, fieldnames, restval='', extrasaction='raise', dialect='excel', *args, **kwds)
+
+ Create an object which operates like a regular writer but maps dictionaries onto
+ output rows. The *fieldnames* parameter identifies the order in which values in
+@@ -219,7 +219,7 @@
+
+ The :class:`Sniffer` class provides two methods:
+
+- .. method:: sniff(sample[, delimiters=None])
++ .. method:: sniff(sample, delimiters=None)
+
+ Analyze the given *sample* and return a :class:`Dialect` subclass
+ reflecting the parameters found. If the optional *delimiters* parameter
+diff -r 70274d53c1dd Doc/library/datetime.rst
+--- a/Doc/library/datetime.rst
++++ b/Doc/library/datetime.rst
+@@ -14,27 +14,34 @@
+ The :mod:`datetime` module supplies classes for manipulating dates and times in
+ both simple and complex ways. While date and time arithmetic is supported, the
+ focus of the implementation is on efficient attribute extraction for output
+-formatting and manipulation. For related
+-functionality, see also the :mod:`time` and :mod:`calendar` modules.
++formatting and manipulation. For related functionality, see also the
++:mod:`time` and :mod:`calendar` modules.
+
+-There are two kinds of date and time objects: "naive" and "aware". This
+-distinction refers to whether the object has any notion of time zone, daylight
+-saving time, or other kind of algorithmic or political time adjustment. Whether
+-a naive :class:`.datetime` object represents Coordinated Universal Time (UTC),
+-local time, or time in some other timezone is purely up to the program, just
+-like it's up to the program whether a particular number represents metres,
+-miles, or mass. Naive :class:`.datetime` objects are easy to understand and to
+-work with, at the cost of ignoring some aspects of reality.
++There are two kinds of date and time objects: "naive" and "aware".
+
+-For applications requiring more, :class:`.datetime` and :class:`.time` objects
+-have an optional time zone information attribute, :attr:`tzinfo`, that can be
+-set to an instance of a subclass of the abstract :class:`tzinfo` class. These
+-:class:`tzinfo` objects capture information about the offset from UTC time, the
+-time zone name, and whether Daylight Saving Time is in effect. Note that no
+-concrete :class:`tzinfo` classes are supplied by the :mod:`datetime` module.
+-Supporting timezones at whatever level of detail is required is up to the
+-application. The rules for time adjustment across the world are more political
+-than rational, and there is no standard suitable for every application.
++An aware object has sufficient knowledge of applicable algorithmic and
++political time adjustments, such as time zone and daylight saving time
++information, to locate itself relative to other aware objects. An aware object
++is used to represent a specific moment in time that is not open to
++interpretation [#]_.
++
+++A naive object does not contain enough information to unambiguously locate
+++itself relative to other date/time objects. Whether a naive object represents
++Coordinated Universal Time (UTC), local time, or time in some other timezone is
++purely up to the program, just like it's up to the program whether a particular
++number represents metres, miles, or mass. Naive objects are easy to understand
++and to work with, at the cost of ignoring some aspects of reality.
++
++For applications requiring aware objects, :class:`.datetime` and :class:`.time`
++objects have an optional time zone information attribute, :attr:`tzinfo`, that
++can be set to an instance of a subclass of the abstract :class:`tzinfo` class.
++These :class:`tzinfo` objects capture information about the offset from UTC
++time, the time zone name, and whether Daylight Saving Time is in effect. Note
++that no concrete :class:`tzinfo` classes are supplied by the :mod:`datetime`
++module. Supporting timezones at whatever level of detail is required is up to
++the application. The rules for time adjustment across the world are more
++political than rational, and there is no standard suitable for every
++application.
+
+ The :mod:`datetime` module exports the following constants:
+
+@@ -105,10 +112,13 @@
+
+ Objects of the :class:`date` type are always naive.
+
+-An object *d* of type :class:`.time` or :class:`.datetime` may be naive or aware.
+-*d* is aware if ``d.tzinfo`` is not ``None`` and ``d.tzinfo.utcoffset(d)`` does
+-not return ``None``. If ``d.tzinfo`` is ``None``, or if ``d.tzinfo`` is not
+-``None`` but ``d.tzinfo.utcoffset(d)`` returns ``None``, *d* is naive.
++An object of type :class:`.time` or :class:`.datetime` may be naive or aware.
++A :class:`.datetime` object *d* is aware if ``d.tzinfo`` is not ``None`` and
++``d.tzinfo.utcoffset(d)`` does not return ``None``. If ``d.tzinfo`` is
++``None``, or if ``d.tzinfo`` is not ``None`` but ``d.tzinfo.utcoffset(d)``
++returns ``None``, *d* is naive. A :class:`.time` object *t* is aware
++if ``t.tzinfo`` is not ``None`` and ``t.tzinfo.utcoffset(None)`` does not return
++``None``. Otherwise, *t* is naive.
+
+ The distinction between naive and aware doesn't apply to :class:`timedelta`
+ objects.
+@@ -1083,14 +1093,14 @@
+
+ >>> from datetime import timedelta, datetime, tzinfo
+ >>> class GMT1(tzinfo):
+- ... def __init__(self): # DST starts last Sunday in March
++ ... def utcoffset(self, dt):
++ ... return timedelta(hours=1) + self.dst(dt)
++ ... def dst(self, dt):
++ ... # DST starts last Sunday in March
+ ... d = datetime(dt.year, 4, 1) # ends last Sunday in October
+ ... self.dston = d - timedelta(days=d.weekday() + 1)
+ ... d = datetime(dt.year, 11, 1)
+ ... self.dstoff = d - timedelta(days=d.weekday() + 1)
+- ... def utcoffset(self, dt):
+- ... return timedelta(hours=1) + self.dst(dt)
+- ... def dst(self, dt):
+ ... if self.dston <= dt.replace(tzinfo=None) < self.dstoff:
+ ... return timedelta(hours=1)
+ ... else:
+@@ -1099,16 +1109,15 @@
+ ... return "GMT +1"
+ ...
+ >>> class GMT2(tzinfo):
+- ... def __init__(self):
++ ... def utcoffset(self, dt):
++ ... return timedelta(hours=2) + self.dst(dt)
++ ... def dst(self, dt):
+ ... d = datetime(dt.year, 4, 1)
+ ... self.dston = d - timedelta(days=d.weekday() + 1)
+ ... d = datetime(dt.year, 11, 1)
+ ... self.dstoff = d - timedelta(days=d.weekday() + 1)
+- ... def utcoffset(self, dt):
+- ... return timedelta(hours=1) + self.dst(dt)
+- ... def dst(self, dt):
+ ... if self.dston <= dt.replace(tzinfo=None) < self.dstoff:
+- ... return timedelta(hours=2)
++ ... return timedelta(hours=1)
+ ... else:
+ ... return timedelta(0)
+ ... def tzname(self,dt):
+@@ -1521,6 +1530,21 @@
+ other fixed-offset :class:`tzinfo` subclass (such as a class representing only
+ EST (fixed offset -5 hours), or only EDT (fixed offset -4 hours)).
+
++.. seealso::
++
++ `pytz <http://pypi.python.org/pypi/pytz/>`_
++ The standard library has no :class:`tzinfo` instances except for UTC, but
++ there exists a third-party library which brings the *IANA timezone
++ database* (also known as the Olson database) to Python: *pytz*.
++
++ *pytz* contains up-to-date information and its usage is recommended.
++
++ `IANA timezone database <http://www.iana.org/time-zones>`_
++ The Time Zone Database (often called tz or zoneinfo) contains code and
++ data that represent the history of local time for many representative
++ locations around the globe. It is updated periodically to reflect changes
++ made by political bodies to time zone boundaries, UTC offsets, and
++ daylight-saving rules.
+
+ .. _strftime-strptime-behavior:
+
+@@ -1692,3 +1716,8 @@
+ (5)
+ For example, if :meth:`utcoffset` returns ``timedelta(hours=-3, minutes=-30)``,
+ ``%z`` is replaced with the string ``'-0330'``.
++
++
++.. rubric:: Footnotes
++
++.. [#] If, that is, we ignore the effects of Relativity
+diff -r 70274d53c1dd Doc/library/dbhash.rst
+--- a/Doc/library/dbhash.rst
++++ b/Doc/library/dbhash.rst
+@@ -6,7 +6,7 @@
+ .. sectionauthor:: Fred L. Drake, Jr. <fdrake@acm.org>
+
+ .. deprecated:: 2.6
+- The :mod:`dbhash` module has been deprecated for removal in Python 3.0.
++ The :mod:`dbhash` module has been removed in Python 3.
+
+ .. index:: module: bsddb
+
+diff -r 70274d53c1dd Doc/library/dbm.rst
+--- a/Doc/library/dbm.rst
++++ b/Doc/library/dbm.rst
+@@ -6,9 +6,9 @@
+ :synopsis: The standard "database" interface, based on ndbm.
+
+ .. note::
+- The :mod:`dbm` module has been renamed to :mod:`dbm.ndbm` in Python 3.0. The
++ The :mod:`dbm` module has been renamed to :mod:`dbm.ndbm` in Python 3. The
+ :term:`2to3` tool will automatically adapt imports when converting your
+- sources to 3.0.
++ sources to Python 3.
+
+
+ The :mod:`dbm` module provides an interface to the Unix "(n)dbm" library. Dbm
+diff -r 70274d53c1dd Doc/library/decimal.rst
+--- a/Doc/library/decimal.rst
++++ b/Doc/library/decimal.rst
+@@ -944,6 +944,10 @@
+ s = calculate_something()
+ s = +s # Round the final result back to the default precision
+
++ with localcontext(BasicContext): # temporarily use the BasicContext
++ print Decimal(1) / Decimal(7)
++ print Decimal(355) / Decimal(113)
++
+ New contexts can also be created using the :class:`Context` constructor
+ described below. In addition, the module provides three pre-made contexts:
+
+diff -r 70274d53c1dd Doc/library/difflib.rst
+--- a/Doc/library/difflib.rst
++++ b/Doc/library/difflib.rst
+@@ -84,7 +84,7 @@
+ The constructor for this class is:
+
+
+- .. function:: __init__([tabsize][, wrapcolumn][, linejunk][, charjunk])
++ .. function:: __init__(tabsize=8, wrapcolumn=None, linejunk=None, charjunk=IS_CHARACTER_JUNK)
+
+ Initializes instance of :class:`HtmlDiff`.
+
+@@ -344,7 +344,7 @@
+ The :class:`SequenceMatcher` class has this constructor:
+
+
+-.. class:: SequenceMatcher([isjunk[, a[, b[, autojunk=True]]]])
++.. class:: SequenceMatcher(isjunk=None, a='', b='', autojunk=True)
+
+ Optional argument *isjunk* must be ``None`` (the default) or a one-argument
+ function that takes a sequence element and returns true if and only if the
+diff -r 70274d53c1dd Doc/library/dircache.rst
+--- a/Doc/library/dircache.rst
++++ b/Doc/library/dircache.rst
+@@ -7,7 +7,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`dircache` module has been removed in Python 3.0.
++ The :mod:`dircache` module has been removed in Python 3.
+
+
+ .. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
+diff -r 70274d53c1dd Doc/library/dl.rst
+--- a/Doc/library/dl.rst
++++ b/Doc/library/dl.rst
+@@ -8,7 +8,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`dl` module has been removed in Python 3.0. Use the :mod:`ctypes`
++ The :mod:`dl` module has been removed in Python 3. Use the :mod:`ctypes`
+ module instead.
+
+ .. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
+diff -r 70274d53c1dd Doc/library/docxmlrpcserver.rst
+--- a/Doc/library/docxmlrpcserver.rst
++++ b/Doc/library/docxmlrpcserver.rst
+@@ -8,8 +8,8 @@
+
+ .. note::
+ The :mod:`DocXMLRPCServer` module has been merged into :mod:`xmlrpc.server`
+- in Python 3.0. The :term:`2to3` tool will automatically adapt imports when
+- converting your sources to 3.0.
++ in Python 3. The :term:`2to3` tool will automatically adapt imports when
++ converting your sources to Python 3.
+
+
+ .. versionadded:: 2.3
+diff -r 70274d53c1dd Doc/library/dumbdbm.rst
+--- a/Doc/library/dumbdbm.rst
++++ b/Doc/library/dumbdbm.rst
+@@ -5,9 +5,9 @@
+ :synopsis: Portable implementation of the simple DBM interface.
+
+ .. note::
+- The :mod:`dumbdbm` module has been renamed to :mod:`dbm.dumb` in Python 3.0.
++ The :mod:`dumbdbm` module has been renamed to :mod:`dbm.dumb` in Python 3.
+ The :term:`2to3` tool will automatically adapt imports when converting your
+- sources to 3.0.
++ sources to Python 3.
+
+ .. index:: single: databases
+
+diff -r 70274d53c1dd Doc/library/dummy_thread.rst
+--- a/Doc/library/dummy_thread.rst
++++ b/Doc/library/dummy_thread.rst
+@@ -6,8 +6,8 @@
+
+ .. note::
+ The :mod:`dummy_thread` module has been renamed to :mod:`_dummy_thread` in
+- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
+- converting your sources to 3.0; however, you should consider using the
++ Python 3. The :term:`2to3` tool will automatically adapt imports when
++ converting your sources to Python 3; however, you should consider using the
+ high-lever :mod:`dummy_threading` module instead.
+
+ **Source code:** :source:`Lib/dummy_thread.py`
+diff -r 70274d53c1dd Doc/library/email.charset.rst
+--- a/Doc/library/email.charset.rst
++++ b/Doc/library/email.charset.rst
+@@ -1,5 +1,5 @@
+-:mod:`email`: Representing character sets
+------------------------------------------
++:mod:`email.charset`: Representing character sets
++-------------------------------------------------
+
+ .. module:: email.charset
+ :synopsis: Character Sets
+diff -r 70274d53c1dd Doc/library/email.encoders.rst
+--- a/Doc/library/email.encoders.rst
++++ b/Doc/library/email.encoders.rst
+@@ -1,5 +1,5 @@
+-:mod:`email`: Encoders
+-----------------------
++:mod:`email.encoders`: Encoders
++-------------------------------
+
+ .. module:: email.encoders
+ :synopsis: Encoders for email message payloads.
+@@ -18,6 +18,10 @@
+ payload, encode it, and reset the payload to this newly encoded value. They
+ should also set the :mailheader:`Content-Transfer-Encoding` header as appropriate.
+
++Note that these functions are not meaningful for a multipart message. They
++must be applied to individual subparts instead, and will raise a
++:exc:`TypeError` if passed a message whose type is multipart.
++
+ Here are the encoding functions provided:
+
+
+diff -r 70274d53c1dd Doc/library/email.errors.rst
+--- a/Doc/library/email.errors.rst
++++ b/Doc/library/email.errors.rst
+@@ -1,5 +1,5 @@
+-:mod:`email`: Exception and Defect classes
+-------------------------------------------
++:mod:`email.errors`: Exception and Defect classes
++-------------------------------------------------
+
+ .. module:: email.errors
+ :synopsis: The exception classes used by the email package.
+diff -r 70274d53c1dd Doc/library/email.generator.rst
+--- a/Doc/library/email.generator.rst
++++ b/Doc/library/email.generator.rst
+@@ -1,5 +1,5 @@
+-:mod:`email`: Generating MIME documents
+----------------------------------------
++:mod:`email.generator`: Generating MIME documents
++-------------------------------------------------
+
+ .. module:: email.generator
+ :synopsis: Generate flat text email messages from a message structure.
+@@ -17,10 +17,10 @@
+ standards-compliant way, should handle MIME and non-MIME email messages just
+ fine, and is designed so that the transformation from flat text, to a message
+ structure via the :class:`~email.parser.Parser` class, and back to flat text,
+-is idempotent (the input is identical to the output). On the other hand, using
+-the Generator on a :class:`~email.message.Message` constructed by program may
+-result in changes to the :class:`~email.message.Message` object as defaults are
+-filled in.
++is idempotent (the input is identical to the output) [#]_. On the other hand,
++using the Generator on a :class:`~email.message.Message` constructed by program
++may result in changes to the :class:`~email.message.Message` object as defaults
++are filled in.
+
+ Here are the public methods of the :class:`Generator` class, imported from the
+ :mod:`email.generator` module:
+@@ -125,3 +125,11 @@
+ .. versionchanged:: 2.5
+ The previously deprecated method :meth:`__call__` was removed.
+
++
++.. rubric:: Footnotes
++
++.. [#] This statement assumes that you use the appropriate setting for the
++ ``unixfrom`` argument, and that you set maxheaderlen=0 (which will
++ preserve whatever the input line lengths were). It is also not strictly
++ true, since in many cases runs of whitespace in headers are collapsed
++ into single blanks. The latter is a bug that will eventually be fixed.
+diff -r 70274d53c1dd Doc/library/email.header.rst
+--- a/Doc/library/email.header.rst
++++ b/Doc/library/email.header.rst
+@@ -1,5 +1,5 @@
+-:mod:`email`: Internationalized headers
+----------------------------------------
++:mod:`email.header`: Internationalized headers
++----------------------------------------------
+
+ .. module:: email.header
+ :synopsis: Representing non-ASCII headers
+diff -r 70274d53c1dd Doc/library/email.iterators.rst
+--- a/Doc/library/email.iterators.rst
++++ b/Doc/library/email.iterators.rst
+@@ -1,5 +1,5 @@
+-:mod:`email`: Iterators
+------------------------
++:mod:`email.iterators`: Iterators
++---------------------------------
+
+ .. module:: email.iterators
+ :synopsis: Iterate over a message object tree.
+diff -r 70274d53c1dd Doc/library/email.message.rst
+--- a/Doc/library/email.message.rst
++++ b/Doc/library/email.message.rst
+@@ -1,5 +1,5 @@
+-:mod:`email`: Representing an email message
+--------------------------------------------
++:mod:`email.message`: Representing an email message
++---------------------------------------------------
+
+ .. module:: email.message
+ :synopsis: The base class representing email messages.
+diff -r 70274d53c1dd Doc/library/email.mime.rst
+--- a/Doc/library/email.mime.rst
++++ b/Doc/library/email.mime.rst
+@@ -1,5 +1,5 @@
+-:mod:`email`: Creating email and MIME objects from scratch
+-----------------------------------------------------------
++:mod:`email.mime`: Creating email and MIME objects from scratch
++---------------------------------------------------------------
+
+ .. module:: email.mime
+ :synopsis: Build MIME messages.
+diff -r 70274d53c1dd Doc/library/email.parser.rst
+--- a/Doc/library/email.parser.rst
++++ b/Doc/library/email.parser.rst
+@@ -1,5 +1,5 @@
+-:mod:`email`: Parsing email messages
+-------------------------------------
++:mod:`email.parser`: Parsing email messages
++-------------------------------------------
+
+ .. module:: email.parser
+ :synopsis: Parse flat text email messages to produce a message object structure.
+diff -r 70274d53c1dd Doc/library/email.util.rst
+--- a/Doc/library/email.util.rst
++++ b/Doc/library/email.util.rst
+@@ -1,5 +1,5 @@
+-:mod:`email`: Miscellaneous utilities
+--------------------------------------
++:mod:`email.utils`: Miscellaneous utilities
++-------------------------------------------
+
+ .. module:: email.utils
+ :synopsis: Miscellaneous email package utilities.
+diff -r 70274d53c1dd Doc/library/filecmp.rst
+--- a/Doc/library/filecmp.rst
++++ b/Doc/library/filecmp.rst
+@@ -75,6 +75,9 @@
+ 'tags']``. *hide* is a list of names to hide, and defaults to ``[os.curdir,
+ os.pardir]``.
+
++ The :class:`dircmp` class compares files by doing *shallow* comparisons
++ as described for :func:`filecmp.cmp`.
++
+ The :class:`dircmp` class provides the following methods:
+
+
+@@ -94,7 +97,7 @@
+ Print a comparison between *a* and *b* and common subdirectories
+ (recursively).
+
+- The :class:`dircmp` offers a number of interesting attributes that may be
++ The :class:`dircmp` class offers a number of interesting attributes that may be
+ used to get various bits of information about the directory trees being
+ compared.
+
+@@ -103,6 +106,16 @@
+ to compute are used.
+
+
++ .. attribute:: left
++
++ The directory *a*.
++
++
++ .. attribute:: right
++
++ The directory *b*.
++
++
+ .. attribute:: left_list
+
+ Files and subdirectories in *a*, filtered by *hide* and *ignore*.
+@@ -146,12 +159,14 @@
+
+ .. attribute:: same_files
+
+- Files which are identical in both *a* and *b*.
++ Files which are identical in both *a* and *b*, using the class's
++ file comparison operator.
+
+
+ .. attribute:: diff_files
+
+- Files which are in both *a* and *b*, whose contents differ.
++ Files which are in both *a* and *b*, whose contents differ according
++ to the class's file comparison operator.
+
+
+ .. attribute:: funny_files
+@@ -163,3 +178,18 @@
+
+ A dictionary mapping names in :attr:`common_dirs` to :class:`dircmp` objects.
+
++
++Here is a simplified example of using the ``subdirs`` attribute to search
++recursively through two directories to show common different files::
++
++ >>> from filecmp import dircmp
++ >>> def print_diff_files(dcmp):
++ ... for name in dcmp.diff_files:
++ ... print "diff_file %s found in %s and %s" % (name, dcmp.left,
++ ... dcmp.right)
++ ... for sub_dcmp in dcmp.subdirs.values():
++ ... print_diff_files(sub_dcmp)
++ ...
++ >>> dcmp = dircmp('dir1', 'dir2')
++ >>> print_diff_files(dcmp)
++
+diff -r 70274d53c1dd Doc/library/fl.rst
+--- a/Doc/library/fl.rst
++++ b/Doc/library/fl.rst
+@@ -9,7 +9,7 @@
+
+
+ .. deprecated:: 2.6
+- The :mod:`fl` module has been deprecated for removal in Python 3.0.
++ The :mod:`fl` module has been removed in Python 3.
+
+
+ .. index::
+@@ -487,7 +487,7 @@
+
+
+ .. deprecated:: 2.6
+- The :mod:`FL` module has been deprecated for removal in Python 3.0.
++ The :mod:`FL` module has been removed in Python 3.
+
+
+ This module defines symbolic constants needed to use the built-in module
+@@ -509,7 +509,7 @@
+
+
+ .. deprecated:: 2.6
+- The :mod:`flp` module has been deprecated for removal in Python 3.0.
++ The :mod:`flp` module has been removed in Python 3.
+
+
+ This module defines functions that can read form definitions created by the
+diff -r 70274d53c1dd Doc/library/fm.rst
+--- a/Doc/library/fm.rst
++++ b/Doc/library/fm.rst
+@@ -8,7 +8,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`fm` module has been deprecated for removal in Python 3.0.
++ The :mod:`fm` module has been removed in Python 3.
+
+
+
+diff -r 70274d53c1dd Doc/library/formatter.rst
+--- a/Doc/library/formatter.rst
++++ b/Doc/library/formatter.rst
+@@ -341,10 +341,10 @@
+ output.
+
+
+-.. class:: DumbWriter([file[, maxcol=72]])
++.. class:: DumbWriter(file=None, maxcol=72)
+
+ Simple writer class which writes output on the file object passed in as *file*
+- or, if *file* is omitted, on standard output. The output is simply word-wrapped
++ or, if *file* is None, on standard output. The output is simply word-wrapped
+ to the number of columns specified by *maxcol*. This class is suitable for
+ reflowing a sequence of paragraphs.
+
+diff -r 70274d53c1dd Doc/library/fpectl.rst
+--- a/Doc/library/fpectl.rst
++++ b/Doc/library/fpectl.rst
+@@ -113,8 +113,8 @@
+ .. seealso::
+
+ Some files in the source distribution may be interesting in learning more about
+- how this module operates. The include file :file:`Include/pyfpe.h` discusses the
+- implementation of this module at some length. :file:`Modules/fpetestmodule.c`
++ how this module operates. The include file :source:`Include/pyfpe.h` discusses the
++ implementation of this module at some length. :source:`Modules/fpetestmodule.c`
+ gives several examples of use. Many additional examples can be found in
+- :file:`Objects/floatobject.c`.
++ :source:`Objects/floatobject.c`.
+
+diff -r 70274d53c1dd Doc/library/fpformat.rst
+--- a/Doc/library/fpformat.rst
++++ b/Doc/library/fpformat.rst
+@@ -7,7 +7,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`fpformat` module has been removed in Python 3.0.
++ The :mod:`fpformat` module has been removed in Python 3.
+
+ .. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
+
+diff -r 70274d53c1dd Doc/library/functions.rst
+--- a/Doc/library/functions.rst
++++ b/Doc/library/functions.rst
+@@ -18,16 +18,25 @@
+ :func:`bool` :func:`filter` :func:`len` :func:`range` :func:`type`
+ :func:`bytearray` :func:`float` :func:`list` :func:`raw_input` :func:`unichr`
+ :func:`callable` :func:`format` :func:`locals` :func:`reduce` :func:`unicode`
+-:func:`chr` :func:`frozenset` :func:`long` :func:`reload` :func:`vars`
++:func:`chr` |func-frozenset|_ :func:`long` :func:`reload` :func:`vars`
+ :func:`classmethod` :func:`getattr` :func:`map` :func:`repr` :func:`xrange`
+ :func:`cmp` :func:`globals` :func:`max` :func:`reversed` :func:`zip`
+-:func:`compile` :func:`hasattr` :func:`memoryview` :func:`round` :func:`__import__`
+-:func:`complex` :func:`hash` :func:`min` :func:`set` :func:`apply`
++:func:`compile` :func:`hasattr` |func-memoryview|_ :func:`round` :func:`__import__`
++:func:`complex` :func:`hash` :func:`min` |func-set|_ :func:`apply`
+ :func:`delattr` :func:`help` :func:`next` :func:`setattr` :func:`buffer`
+-:func:`dict` :func:`hex` :func:`object` :func:`slice` :func:`coerce`
++|func-dict|_ :func:`hex` :func:`object` :func:`slice` :func:`coerce`
+ :func:`dir` :func:`id` :func:`oct` :func:`sorted` :func:`intern`
+ =================== ================= ================== ================= ====================
+
++.. using :func:`dict` would create a link to another page, so local targets are
++ used, with replacement texts to make the output in the table consistent
++
++.. |func-dict| replace:: ``dict()``
++.. |func-frozenset| replace:: ``frozenset()``
++.. |func-memoryview| replace:: ``memoryview()``
++.. |func-set| replace:: ``set()``
++
++
+ .. function:: abs(x)
+
+ Return the absolute value of a number. The argument may be a plain or long
+@@ -247,6 +256,13 @@
+ the function serves as a numeric conversion function like :func:`int`,
+ :func:`long` and :func:`float`. If both arguments are omitted, returns ``0j``.
+
++ .. note::
++
++ When converting from a string, the string must not contain whitespace
++ around the central ``+`` or ``-`` operator. For example,
++ ``complex('1+2j')`` is fine, but ``complex('1 + 2j')`` raises
++ :exc:`ValueError`.
++
+ The complex type is described in :ref:`typesnumeric`.
+
+
+@@ -258,6 +274,7 @@
+ example, ``delattr(x, 'foobar')`` is equivalent to ``del x.foobar``.
+
+
++.. _func-dict:
+ .. function:: dict([arg])
+ :noindex:
+
+@@ -337,7 +354,7 @@
+ Using :func:`divmod` with complex numbers is deprecated.
+
+
+-.. function:: enumerate(sequence[, start=0])
++.. function:: enumerate(sequence, start=0)
+
+ Return an enumerate object. *sequence* must be a sequence, an
+ :term:`iterator`, or some other object which supports iteration. The
+@@ -413,7 +430,10 @@
+ The arguments are a file name and two optional dictionaries. The file is parsed
+ and evaluated as a sequence of Python statements (similarly to a module) using
+ the *globals* and *locals* dictionaries as global and local namespace. If
+- provided, *locals* can be any mapping object.
++ provided, *locals* can be any mapping object. Remember that at module level,
++ globals and locals are the same dictionary. If two separate objects are
++ passed as *globals* and *locals*, the code will be executed as if it were
++ embedded in a class definition.
+
+ .. versionchanged:: 2.4
+ formerly *locals* was required to be a dictionary.
+@@ -431,7 +451,7 @@
+ used reliably to modify a function's locals.
+
+
+-.. function:: file(filename[, mode[, bufsize]])
++.. function:: file(name[, mode[, buffering]])
+
+ Constructor function for the :class:`file` type, described further in section
+ :ref:`bltin-file-objects`. The constructor's arguments are the same as those
+@@ -506,6 +526,7 @@
+ .. versionadded:: 2.6
+
+
++.. _func-frozenset:
+ .. function:: frozenset([iterable])
+ :noindex:
+
+@@ -745,7 +766,7 @@
+ .. versionchanged:: 2.5
+ Added support for the optional *key* argument.
+
+-
++.. _func-memoryview:
+ .. function:: memoryview(obj)
+ :noindex:
+
+@@ -894,7 +915,7 @@
+ accidents.)
+
+
+-.. function:: print([object, ...][, sep=' '][, end='\\n'][, file=sys.stdout])
++.. function:: print([object, ...], sep=' ', end='\\n', file=sys.stdout)
+
+ Print *object*\(s) to the stream *file*, separated by *sep* and followed by
+ *end*. *sep*, *end* and *file*, if present, must be given as keyword
+@@ -1174,6 +1195,8 @@
+ can't be represented exactly as a float. See :ref:`tut-fp-issues` for
+ more information.
+
++
++.. _func-set:
+ .. function:: set([iterable])
+ :noindex:
+
+@@ -1535,7 +1558,7 @@
+ .. note::
+
+ This is an advanced function that is not needed in everyday Python
+- programming.
++ programming, unlike :func:`importlib.import_module`.
+
+ This function is invoked by the :keyword:`import` statement. It can be
+ replaced (by importing the :mod:`__builtin__` module and assigning to
+@@ -1586,15 +1609,8 @@
+ names.
+
+ If you simply want to import a module (potentially within a package) by name,
+- you can call :func:`__import__` and then look it up in :data:`sys.modules`::
++ use :func:`importlib.import_module`.
+
+- >>> import sys
+- >>> name = 'foo.bar.baz'
+- >>> __import__(name)
+- <module 'foo' from ...>
+- >>> baz = sys.modules[name]
+- >>> baz
+- <module 'foo.bar.baz' from ...>
+
+ .. versionchanged:: 2.5
+ The level parameter was added.
+diff -r 70274d53c1dd Doc/library/gdbm.rst
+--- a/Doc/library/gdbm.rst
++++ b/Doc/library/gdbm.rst
+@@ -6,9 +6,9 @@
+ :synopsis: GNU's reinterpretation of dbm.
+
+ .. note::
+- The :mod:`gdbm` module has been renamed to :mod:`dbm.gnu` in Python 3.0. The
++ The :mod:`gdbm` module has been renamed to :mod:`dbm.gnu` in Python 3. The
+ :term:`2to3` tool will automatically adapt imports when converting your
+- sources to 3.0.
++ sources to Python 3.
+
+
+ .. index:: module: dbm
+diff -r 70274d53c1dd Doc/library/gl.rst
+--- a/Doc/library/gl.rst
++++ b/Doc/library/gl.rst
+@@ -8,7 +8,7 @@
+
+
+ .. deprecated:: 2.6
+- The :mod:`gl` module has been deprecated for removal in Python 3.0.
++ The :mod:`gl` module has been removed in Python 3.
+
+
+ This module provides access to the Silicon Graphics *Graphics Library*. It is
+@@ -168,7 +168,7 @@
+
+
+ .. deprecated:: 2.6
+- The :mod:`DEVICE` module has been deprecated for removal in Python 3.0.
++ The :mod:`DEVICE` module has been removed in Python 3.
+
+
+ This modules defines the constants used by the Silicon Graphics *Graphics
+@@ -186,7 +186,7 @@
+
+
+ .. deprecated:: 2.6
+- The :mod:`GL` module has been deprecated for removal in Python 3.0.
++ The :mod:`GL` module has been removed in Python 3.
+
+ This module contains constants used by the Silicon Graphics *Graphics Library*
+ from the C header file ``<gl/gl.h>``. Read the module source file for details.
+diff -r 70274d53c1dd Doc/library/gzip.rst
+--- a/Doc/library/gzip.rst
++++ b/Doc/library/gzip.rst
+@@ -22,9 +22,6 @@
+ :program:`gzip` and :program:`gunzip` programs, such as those produced by
+ :program:`compress` and :program:`pack`, are not supported by this module.
+
+-For other archive formats, see the :mod:`bz2`, :mod:`zipfile`, and
+-:mod:`tarfile` modules.
+-
+ The module defines the following items:
+
+
+diff -r 70274d53c1dd Doc/library/htmllib.rst
+--- a/Doc/library/htmllib.rst
++++ b/Doc/library/htmllib.rst
+@@ -6,7 +6,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`htmllib` module has been removed in Python 3.0.
++ The :mod:`htmllib` module has been removed in Python 3.
+
+
+ .. index::
+@@ -162,8 +162,8 @@
+ .. note::
+
+ The :mod:`htmlentitydefs` module has been renamed to :mod:`html.entities` in
+- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
+- converting your sources to 3.0.
++ Python 3. The :term:`2to3` tool will automatically adapt imports when
++ converting your sources to Python 3.
+
+ **Source code:** :source:`Lib/htmlentitydefs.py`
+
+diff -r 70274d53c1dd Doc/library/httplib.rst
+--- a/Doc/library/httplib.rst
++++ b/Doc/library/httplib.rst
+@@ -6,8 +6,8 @@
+
+ .. note::
+ The :mod:`httplib` module has been renamed to :mod:`http.client` in Python
+- 3.0. The :term:`2to3` tool will automatically adapt imports when converting
+- your sources to 3.0.
++ 3. The :term:`2to3` tool will automatically adapt imports when converting
++ your sources to Python 3.
+
+
+ .. index::
+@@ -89,7 +89,7 @@
+ *source_address* was added.
+
+
+-.. class:: HTTPResponse(sock[, debuglevel=0][, strict=0])
++.. class:: HTTPResponse(sock, debuglevel=0, strict=0)
+
+ Class whose instances are returned upon successful connection. Not instantiated
+ directly by user.
+diff -r 70274d53c1dd Doc/library/imageop.rst
+--- a/Doc/library/imageop.rst
++++ b/Doc/library/imageop.rst
+@@ -7,7 +7,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`imageop` module has been removed in Python 3.0.
++ The :mod:`imageop` module has been removed in Python 3.
+
+ The :mod:`imageop` module contains some useful operations on images. It operates
+ on images consisting of 8 or 32 bit pixels stored in Python strings. This is
+diff -r 70274d53c1dd Doc/library/imgfile.rst
+--- a/Doc/library/imgfile.rst
++++ b/Doc/library/imgfile.rst
+@@ -8,7 +8,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`imgfile` module has been deprecated for removal in Python 3.0.
++ The :mod:`imgfile` module has been removed in Python 3.
+
+
+
+diff -r 70274d53c1dd Doc/library/imp.rst
+--- a/Doc/library/imp.rst
++++ b/Doc/library/imp.rst
+@@ -65,7 +65,7 @@
+ path and the last item in the *description* tuple is :const:`PKG_DIRECTORY`.
+
+ This function does not handle hierarchical module names (names containing
+- dots). In order to find *P*.*M*, that is, submodule *M* of package *P*, use
++ dots). In order to find *P.M*, that is, submodule *M* of package *P*, use
+ :func:`find_module` and :func:`load_module` to find and load package *P*, and
+ then use :func:`find_module` with the *path* argument set to ``P.__path__``.
+ When *P* itself has a dotted name, apply this recipe recursively.
+diff -r 70274d53c1dd Doc/library/imputil.rst
+--- a/Doc/library/imputil.rst
++++ b/Doc/library/imputil.rst
+@@ -7,7 +7,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`imputil` module has been removed in Python 3.0.
++ The :mod:`imputil` module has been removed in Python 3.
+
+
+ .. index:: statement: import
+diff -r 70274d53c1dd Doc/library/io.rst
+--- a/Doc/library/io.rst
++++ b/Doc/library/io.rst
+@@ -696,11 +696,13 @@
+ Read and return at most *n* characters from the stream as a single
+ :class:`unicode`. If *n* is negative or ``None``, reads until EOF.
+
+- .. method:: readline()
++ .. method:: readline(limit=-1)
+
+ Read until newline or EOF and return a single ``unicode``. If the
+ stream is already at EOF, an empty string is returned.
+
++ If *limit* is specified, at most *limit* characters will be read.
++
+ .. method:: seek(offset, whence=SEEK_SET)
+
+ Change the stream position to the given *offset*. Behaviour depends
+@@ -752,14 +754,22 @@
+ sequences) can be used. Any other error handling name that has been
+ registered with :func:`codecs.register_error` is also valid.
+
+- *newline* can be ``None``, ``''``, ``'\n'``, ``'\r'``, or ``'\r\n'``. It
+- controls the handling of line endings. If it is ``None``, universal newlines
+- is enabled. With this enabled, on input, the lines endings ``'\n'``,
+- ``'\r'``, or ``'\r\n'`` are translated to ``'\n'`` before being returned to
+- the caller. Conversely, on output, ``'\n'`` is translated to the system
+- default line separator, :data:`os.linesep`. If *newline* is any other of its
+- legal values, that newline becomes the newline when the file is read and it
+- is returned untranslated. On output, ``'\n'`` is converted to the *newline*.
++ *newline* controls how line endings are handled. It can be ``None``,
++ ``''``, ``'\n'``, ``'\r'``, and ``'\r\n'``. It works as follows:
++
++ * On input, if *newline* is ``None``, universal newlines mode is enabled.
++ Lines in the input can end in ``'\n'``, ``'\r'``, or ``'\r\n'``, and these
++ are translated into ``'\n'`` before being returned to the caller. If it is
++ ``''``, universal newline mode is enabled, but line endings are returned to
++ the caller untranslated. If it has any of the other legal values, input
++ lines are only terminated by the given string, and the line ending is
++ returned to the caller untranslated.
++
++ * On output, if *newline* is ``None``, any ``'\n'`` characters written are
++ translated to the system default line separator, :data:`os.linesep`. If
++ *newline* is ``''``, no translation takes place. If *newline* is any of
++ the other legal values, any ``'\n'`` characters written are translated to
++ the given string.
+
+ If *line_buffering* is ``True``, :meth:`flush` is implied when a call to
+ write contains a newline character.
+diff -r 70274d53c1dd Doc/library/itertools.rst
+--- a/Doc/library/itertools.rst
++++ b/Doc/library/itertools.rst
+@@ -733,7 +733,8 @@
+ return izip(a, b)
+
+ def grouper(n, iterable, fillvalue=None):
+- "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
++ "Collect data into fixed-length chunks or blocks"
++ # grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
+ args = [iter(iterable)] * n
+ return izip_longest(fillvalue=fillvalue, *args)
+
+diff -r 70274d53c1dd Doc/library/jpeg.rst
+--- a/Doc/library/jpeg.rst
++++ b/Doc/library/jpeg.rst
+@@ -8,7 +8,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`jpeg` module has been deprecated for removal in Python 3.0.
++ The :mod:`jpeg` module has been removed in Python 3.
+
+
+
+diff -r 70274d53c1dd Doc/library/json.rst
+--- a/Doc/library/json.rst
++++ b/Doc/library/json.rst
+@@ -99,8 +99,8 @@
+ {
+ "json": "obj"
+ }
+- $ echo '{ 1.2:3.4}' | python -mjson.tool
+- Expecting property name: line 1 column 2 (char 2)
++ $ echo '{1.2:3.4}' | python -mjson.tool
++ Expecting property name enclosed in double quotes: line 1 column 1 (char 1)
+
+ .. highlight:: python
+
+@@ -170,6 +170,14 @@
+ :class:`unicode` instance. The other arguments have the same meaning as in
+ :func:`dump`.
+
++ .. note::
++
++ Keys in key/value pairs of JSON are always of the type :class:`str`. When
++ a dictionary is converted into JSON, all the keys of the dictionary are
++ coerced to strings. As a result of this, if a dictionary is convered
++ into JSON and then back into a dictionary, the dictionary may not equal
++ the original one. That is, ``loads(dumps(x)) != x`` if x has non-string
++ keys.
+
+ .. function:: load(fp[, encoding[, cls[, object_hook[, parse_float[, parse_int[, parse_constant[, object_pairs_hook[, **kw]]]]]]]])
+
+@@ -209,10 +217,13 @@
+ (e.g. :class:`float`).
+
+ *parse_constant*, if specified, will be called with one of the following
+- strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``, ``'null'``, ``'true'``,
+- ``'false'``. This can be used to raise an exception if invalid JSON numbers
++ strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``.
++ This can be used to raise an exception if invalid JSON numbers
+ are encountered.
+
++ .. versionchanged:: 2.7
++ *parse_constant* doesn't get called on 'null', 'true', 'false' anymore.
++
+ To use a custom :class:`JSONDecoder` subclass, specify it with the ``cls``
+ kwarg; otherwise :class:`JSONDecoder` is used. Additional keyword arguments
+ will be passed to the constructor of the class.
+diff -r 70274d53c1dd Doc/library/locale.rst
+--- a/Doc/library/locale.rst
++++ b/Doc/library/locale.rst
+@@ -164,22 +164,22 @@
+
+ .. data:: D_T_FMT
+
+- Get a string that can be used as a format string for :func:`strftime` to
++ Get a string that can be used as a format string for :func:`time.strftime` to
+ represent date and time in a locale-specific way.
+
+ .. data:: D_FMT
+
+- Get a string that can be used as a format string for :func:`strftime` to
++ Get a string that can be used as a format string for :func:`time.strftime` to
+ represent a date in a locale-specific way.
+
+ .. data:: T_FMT
+
+- Get a string that can be used as a format string for :func:`strftime` to
++ Get a string that can be used as a format string for :func:`time.strftime` to
+ represent a time in a locale-specific way.
+
+ .. data:: T_FMT_AMPM
+
+- Get a format string for :func:`strftime` to represent time in the am/pm
++ Get a format string for :func:`time.strftime` to represent time in the am/pm
+ format.
+
+ .. data:: DAY_1 ... DAY_7
+@@ -243,24 +243,24 @@
+ then-emperor's reign.
+
+ Normally it should not be necessary to use this value directly. Specifying
+- the ``E`` modifier in their format strings causes the :func:`strftime`
++ the ``E`` modifier in their format strings causes the :func:`time.strftime`
+ function to use this information. The format of the returned string is not
+ specified, and therefore you should not assume knowledge of it on different
+ systems.
+
+ .. data:: ERA_D_T_FMT
+
+- Get a format string for :func:`strftime` to represent date and time in a
++ Get a format string for :func:`time.strftime` to represent date and time in a
+ locale-specific era-based way.
+
+ .. data:: ERA_D_FMT
+
+- Get a format string for :func:`strftime` to represent a date in a
++ Get a format string for :func:`time.strftime` to represent a date in a
+ locale-specific era-based way.
+
+ .. data:: ERA_T_FMT
+
+- Get a format string for :func:`strftime` to represent a time in a
++ Get a format string for :func:`time.strftime` to represent a time in a
+ locale-specific era-based way.
+
+ .. data:: ALT_DIGITS
+diff -r 70274d53c1dd Doc/library/logging.config.rst
+--- a/Doc/library/logging.config.rst
++++ b/Doc/library/logging.config.rst
+@@ -112,6 +112,19 @@
+ send it to the socket as a string of bytes preceded by a four-byte length
+ string packed in binary using ``struct.pack('>L', n)``.
+
++ .. note:: Because portions of the configuration are passed through
++ :func:`eval`, use of this function may open its users to a security risk.
++ While the function only binds to a socket on ``localhost``, and so does
++ not accept connections from remote machines, there are scenarios where
++ untrusted code could be run under the account of the process which calls
++ :func:`listen`. Specifically, if the process calling :func:`listen` runs
++ on a multi-user machine where users cannot trust each other, then a
++ malicious user could arrange to run essentially arbitrary code in a
++ victim user's process, simply by connecting to the victim's
++ :func:`listen` socket and sending a configuration which runs whatever
++ code the attacker wants to have executed in the victim's process. This is
++ especially easy to do if the default port is used, but not hard even if a
++ different port is used).
+
+ .. function:: stopListening()
+
+@@ -701,6 +714,12 @@
+ :class:`Formatter` subclass. Subclasses of :class:`Formatter` can present
+ exception tracebacks in an expanded or condensed format.
+
++.. note:: Due to the use of :func:`eval` as described above, there are
++ potential security risks which result from using the :func:`listen` to send
++ and receive configurations via sockets. The risks are limited to where
++ multiple users with no mutual trust run code on the same machine; see the
++ :func:`listen` documentation for more information.
++
+ .. seealso::
+
+ Module :mod:`logging`
+diff -r 70274d53c1dd Doc/library/logging.handlers.rst
+--- a/Doc/library/logging.handlers.rst
++++ b/Doc/library/logging.handlers.rst
+@@ -650,7 +650,7 @@
+ :class:`BufferingHandler`, which is an abstract class. This buffers logging
+ records in memory. Whenever each record is added to the buffer, a check is made
+ by calling :meth:`shouldFlush` to see if the buffer should be flushed. If it
+-should, then :meth:`flush` is expected to do the needful.
++should, then :meth:`flush` is expected to do the flushing.
+
+
+ .. class:: BufferingHandler(capacity)
+diff -r 70274d53c1dd Doc/library/logging.rst
+--- a/Doc/library/logging.rst
++++ b/Doc/library/logging.rst
+@@ -51,9 +51,21 @@
+ Logger Objects
+ --------------
+
+-Loggers have the following attributes and methods. Note that Loggers are never
++Loggers have the following attributes and methods. Note that Loggers are never
+ instantiated directly, but always through the module-level function
+-``logging.getLogger(name)``.
++``logging.getLogger(name)``. Multiple calls to :func:`getLogger` with the same
++name will always return a reference to the same Logger object.
++
++The ``name`` is potentially a period-separated hierarchical value, like
++``foo.bar.baz`` (though it could also be just plain ``foo``, for example).
++Loggers that are further down in the hierarchical list are children of loggers
++higher up in the list. For example, given a logger with a name of ``foo``,
++loggers with names of ``foo.bar``, ``foo.bar.baz``, and ``foo.bam`` are all
++descendants of ``foo``. The logger name hierarchy is analogous to the Python
++package hierarchy, and identical to it if you organise your loggers on a
++per-module basis using the recommended construction
++``logging.getLogger(__name__)``. That's because in a module, ``__name__``
++is the module's name in the Python package namespace.
+
+ .. class:: Logger
+
+@@ -138,7 +150,7 @@
+
+ FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'
+ logging.basicConfig(format=FORMAT)
+- d = { 'clientip' : '192.168.0.1', 'user' : 'fbloggs' }
++ d = {'clientip': '192.168.0.1', 'user': 'fbloggs'}
+ logger = logging.getLogger('tcpserver')
+ logger.warning('Protocol problem: %s', 'connection reset', extra=d)
+
+@@ -617,6 +629,9 @@
+ .. versionchanged:: 2.5
+ *funcName* was added.
+
++.. versionchanged:: 2.6
++ *processName* was added.
++
+ .. _logger-adapter:
+
+ LoggerAdapter Objects
+@@ -802,7 +817,8 @@
+ effect is to disable all logging calls of severity *lvl* and below, so that
+ if you call it with a value of INFO, then all INFO and DEBUG events would be
+ discarded, whereas those of severity WARNING and above would be processed
+- according to the logger's effective level.
++ according to the logger's effective level. To undo the effect of a call to
++ ``logging.disable(lvl)``, call ``logging.disable(logging.NOTSET)``.
+
+
+ .. function:: addLevelName(lvl, levelName)
+@@ -915,12 +931,11 @@
+ If *capture* is ``True``, warnings issued by the :mod:`warnings` module will
+ be redirected to the logging system. Specifically, a warning will be
+ formatted using :func:`warnings.formatwarning` and the resulting string
+- logged to a logger named 'py.warnings' with a severity of `WARNING`.
++ logged to a logger named ``'py.warnings'`` with a severity of :const:`WARNING`.
+
+ If *capture* is ``False``, the redirection of warnings to the logging system
+ will stop, and warnings will be redirected to their original destinations
+- (i.e. those in effect before `captureWarnings(True)` was called).
+-
++ (i.e. those in effect before ``captureWarnings(True)`` was called).
+
+
+ .. seealso::
+diff -r 70274d53c1dd Doc/library/macostools.rst
+--- a/Doc/library/macostools.rst
++++ b/Doc/library/macostools.rst
+@@ -15,7 +15,7 @@
+
+ .. note::
+
+- This module has been removed in Python 3.0.
++ This module has been removed in Python 3.
+
+
+
+diff -r 70274d53c1dd Doc/library/mailbox.rst
+--- a/Doc/library/mailbox.rst
++++ b/Doc/library/mailbox.rst
+@@ -154,7 +154,7 @@
+ when the :class:`Mailbox` instance was initialized.
+
+
+- .. method:: get(key[, default=None])
++ .. method:: get(key, default=None)
+ __getitem__(key)
+
+ Return a representation of the message corresponding to *key*. If no such
+@@ -278,7 +278,7 @@
+ ^^^^^^^^^^^^^^^^
+
+
+-.. class:: Maildir(dirname[, factory=rfc822.Message[, create=True]])
++.. class:: Maildir(dirname, factory=rfc822.Message, create=True)
+
+ A subclass of :class:`Mailbox` for mailboxes in Maildir format. Parameter
+ *factory* is a callable object that accepts a file-like message representation
+@@ -423,7 +423,7 @@
+ ^^^^^^^^^^^^^
+
+
+-.. class:: mbox(path[, factory=None[, create=True]])
++.. class:: mbox(path, factory=None, create=True)
+
+ A subclass of :class:`Mailbox` for mailboxes in mbox format. Parameter *factory*
+ is a callable object that accepts a file-like message representation (which
+@@ -483,7 +483,7 @@
+ ^^^^^^^^^^^
+
+
+-.. class:: MH(path[, factory=None[, create=True]])
++.. class:: MH(path, factory=None, create=True)
+
+ A subclass of :class:`Mailbox` for mailboxes in MH format. Parameter *factory*
+ is a callable object that accepts a file-like message representation (which
+@@ -613,7 +613,7 @@
+ ^^^^^^^^^^^^^^
+
+
+-.. class:: Babyl(path[, factory=None[, create=True]])
++.. class:: Babyl(path, factory=None, create=True)
+
+ A subclass of :class:`Mailbox` for mailboxes in Babyl format. Parameter
+ *factory* is a callable object that accepts a file-like message representation
+@@ -689,7 +689,7 @@
+ ^^^^^^^^^^^^^
+
+
+-.. class:: MMDF(path[, factory=None[, create=True]])
++.. class:: MMDF(path, factory=None, create=True)
+
+ A subclass of :class:`Mailbox` for mailboxes in MMDF format. Parameter *factory*
+ is a callable object that accepts a file-like message representation (which
+@@ -987,7 +987,7 @@
+ are excluded.
+
+
+- .. method:: set_from(from_[, time_=None])
++ .. method:: set_from(from_, time_=None)
+
+ Set the "From " line to *from_*, which should be specified without a
+ leading "From " or trailing newline. For convenience, *time_* may be
+@@ -1358,7 +1358,7 @@
+ are excluded.
+
+
+- .. method:: set_from(from_[, time_=None])
++ .. method:: set_from(from_, time_=None)
+
+ Set the "From " line to *from_*, which should be specified without a
+ leading "From " or trailing newline. For convenience, *time_* may be
+@@ -1513,7 +1513,7 @@
+ mailboxes, such as adding or removing message, and do not provide classes to
+ represent format-specific message properties. For backward compatibility, the
+ older mailbox classes are still available, but the newer classes should be used
+-in preference to them. The old classes will be removed in Python 3.0.
++in preference to them. The old classes have been removed in Python 3.
+
+ Older mailbox objects support only iteration and provide a single public method:
+
+diff -r 70274d53c1dd Doc/library/markup.rst
+--- a/Doc/library/markup.rst
++++ b/Doc/library/markup.rst
+@@ -1,4 +1,3 @@
+-
+ .. _markup:
+
+ **********************************
+@@ -26,7 +25,7 @@
+ htmlparser.rst
+ sgmllib.rst
+ htmllib.rst
+- pyexpat.rst
++ xml.etree.elementtree.rst
+ xml.dom.rst
+ xml.dom.minidom.rst
+ xml.dom.pulldom.rst
+@@ -34,4 +33,4 @@
+ xml.sax.handler.rst
+ xml.sax.utils.rst
+ xml.sax.reader.rst
+- xml.etree.elementtree.rst
++ pyexpat.rst
+diff -r 70274d53c1dd Doc/library/mhlib.rst
+--- a/Doc/library/mhlib.rst
++++ b/Doc/library/mhlib.rst
+@@ -6,7 +6,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`mhlib` module has been removed in Python 3.0. Use the
++ The :mod:`mhlib` module has been removed in Python 3. Use the
+ :mod:`mailbox` instead.
+
+ .. sectionauthor:: Skip Montanaro <skip@pobox.com>
+diff -r 70274d53c1dd Doc/library/multiprocessing.rst
+--- a/Doc/library/multiprocessing.rst
++++ b/Doc/library/multiprocessing.rst
+@@ -81,7 +81,8 @@
+ def info(title):
+ print title
+ print 'module name:', __name__
+- print 'parent process:', os.getppid()
++ if hasattr(os, 'getppid'): # only available on Unix
++ print 'parent process:', os.getppid()
+ print 'process id:', os.getpid()
+
+ def f(name):
+@@ -107,7 +108,7 @@
+
+ **Queues**
+
+- The :class:`Queue` class is a near clone of :class:`Queue.Queue`. For
++ The :class:`~multiprocessing.Queue` class is a near clone of :class:`Queue.Queue`. For
+ example::
+
+ from multiprocessing import Process, Queue
+@@ -231,7 +232,7 @@
+ A manager returned by :func:`Manager` will support types :class:`list`,
+ :class:`dict`, :class:`Namespace`, :class:`Lock`, :class:`RLock`,
+ :class:`Semaphore`, :class:`BoundedSemaphore`, :class:`Condition`,
+- :class:`Event`, :class:`Queue`, :class:`Value` and :class:`Array`. For
++ :class:`Event`, :class:`~multiprocessing.Queue`, :class:`Value` and :class:`Array`. For
+ example, ::
+
+ from multiprocessing import Process, Manager
+@@ -464,9 +465,9 @@
+ For passing messages one can use :func:`Pipe` (for a connection between two
+ processes) or a queue (which allows multiple producers and consumers).
+
+-The :class:`Queue`, :class:`multiprocessing.queues.SimpleQueue` and :class:`JoinableQueue` types are multi-producer,
++The :class:`~multiprocessing.Queue`, :class:`multiprocessing.queues.SimpleQueue` and :class:`JoinableQueue` types are multi-producer,
+ multi-consumer FIFO queues modelled on the :class:`Queue.Queue` class in the
+-standard library. They differ in that :class:`Queue` lacks the
++standard library. They differ in that :class:`~multiprocessing.Queue` lacks the
+ :meth:`~Queue.Queue.task_done` and :meth:`~Queue.Queue.join` methods introduced
+ into Python 2.5's :class:`Queue.Queue` class.
+
+@@ -489,7 +490,7 @@
+ .. warning::
+
+ If a process is killed using :meth:`Process.terminate` or :func:`os.kill`
+- while it is trying to use a :class:`Queue`, then the data in the queue is
++ while it is trying to use a :class:`~multiprocessing.Queue`, then the data in the queue is
+ likely to become corrupted. This may cause any other process to get an
+ exception when it tries to use the queue later on.
+
+@@ -531,7 +532,7 @@
+ The usual :exc:`Queue.Empty` and :exc:`Queue.Full` exceptions from the
+ standard library's :mod:`Queue` module are raised to signal timeouts.
+
+- :class:`Queue` implements all the methods of :class:`Queue.Queue` except for
++ :class:`~multiprocessing.Queue` implements all the methods of :class:`Queue.Queue` except for
+ :meth:`~Queue.Queue.task_done` and :meth:`~Queue.Queue.join`.
+
+ .. method:: qsize()
+@@ -582,7 +583,7 @@
+
+ Equivalent to ``get(False)``.
+
+- :class:`multiprocessing.Queue` has a few additional methods not found in
++ :class:`~multiprocessing.Queue` has a few additional methods not found in
+ :class:`Queue.Queue`. These methods are usually unnecessary for most
+ code:
+
+@@ -612,7 +613,7 @@
+
+ .. class:: multiprocessing.queues.SimpleQueue()
+
+- It is a simplified :class:`Queue` type, very close to a locked :class:`Pipe`.
++ It is a simplified :class:`~multiprocessing.Queue` type, very close to a locked :class:`Pipe`.
+
+ .. method:: empty()
+
+@@ -629,7 +630,7 @@
+
+ .. class:: JoinableQueue([maxsize])
+
+- :class:`JoinableQueue`, a :class:`Queue` subclass, is a queue which
++ :class:`JoinableQueue`, a :class:`~multiprocessing.Queue` subclass, is a queue which
+ additionally has :meth:`task_done` and :meth:`join` methods.
+
+ .. method:: task_done()
+@@ -2084,7 +2085,7 @@
+ Bear in mind that a process that has put items in a queue will wait before
+ terminating until all the buffered items are fed by the "feeder" thread to
+ the underlying pipe. (The child process can call the
+- :meth:`Queue.cancel_join_thread` method of the queue to avoid this behaviour.)
++ :meth:`~multiprocessing.Queue.cancel_join_thread` method of the queue to avoid this behaviour.)
+
+ This means that whenever you use a queue you need to make sure that all
+ items which have been put on the queue will eventually be removed before the
+diff -r 70274d53c1dd Doc/library/mutex.rst
+--- a/Doc/library/mutex.rst
++++ b/Doc/library/mutex.rst
+@@ -7,7 +7,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`mutex` module has been removed in Python 3.0.
++ The :mod:`mutex` module has been removed in Python 3.
+
+ .. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
+
+diff -r 70274d53c1dd Doc/library/new.rst
+--- a/Doc/library/new.rst
++++ b/Doc/library/new.rst
+@@ -6,7 +6,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`new` module has been removed in Python 3.0. Use the :mod:`types`
++ The :mod:`new` module has been removed in Python 3. Use the :mod:`types`
+ module's classes instead.
+
+ .. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
+diff -r 70274d53c1dd Doc/library/os.path.rst
+--- a/Doc/library/os.path.rst
++++ b/Doc/library/os.path.rst
+@@ -331,7 +331,7 @@
+
+ .. note::
+
+- This function is deprecated and has been removed in 3.0 in favor of
++ This function is deprecated and has been removed in Python 3 in favor of
+ :func:`os.walk`.
+
+
+diff -r 70274d53c1dd Doc/library/os.rst
+--- a/Doc/library/os.rst
++++ b/Doc/library/os.rst
+@@ -157,6 +157,20 @@
+
+ Availability: Unix.
+
++ .. note:: On Mac OS X, :func:`getgroups` behavior differs somewhat from
++ other Unix platforms. If the Python interpreter was built with a
++ deployment target of :const:`10.5` or earlier, :func:`getgroups` returns
++ the list of effective group ids associated with the current user process;
++ this list is limited to a system-defined number of entries, typically 16,
++ and may be modified by calls to :func:`setgroups` if suitably privileged.
++ If built with a deployment target greater than :const:`10.5`,
++ :func:`getgroups` returns the current group access list for the user
++ associated with the effective user id of the process; the group access
++ list may change over the lifetime of the process, it is not affected by
++ calls to :func:`setgroups`, and its length is not limited to 16. The
++ deployment target value, :const:`MACOSX_DEPLOYMENT_TARGET`, can be
++ obtained with :func:`sysconfig.get_config_var`.
++
+
+ .. function:: initgroups(username, gid)
+
+@@ -306,6 +320,10 @@
+
+ .. versionadded:: 2.2
+
++ .. note:: On Mac OS X, the length of *groups* may not exceed the
++ system-defined maximum number of effective group ids, typically 16.
++ See the documentation for :func:`getgroups` for cases where it may not
++ return the same group list set by calling setgroups().
+
+ .. function:: setpgrp()
+
+@@ -1163,7 +1181,7 @@
+ doesn't open the FIFO --- it just creates the rendezvous point.
+
+
+-.. function:: mknod(filename[, mode=0600, device])
++.. function:: mknod(filename[, mode=0600[, device=0]])
+
+ Create a filesystem node (file, device special file or named pipe) named
+ *filename*. *mode* specifies both the permissions to use and the type of node to
+@@ -1565,7 +1583,7 @@
+ Availability: Unix, Windows.
+
+
+-.. function:: walk(top[, topdown=True [, onerror=None[, followlinks=False]]])
++.. function:: walk(top, topdown=True, onerror=None, followlinks=False)
+
+ .. index::
+ single: directory; walking
+diff -r 70274d53c1dd Doc/library/ossaudiodev.rst
+--- a/Doc/library/ossaudiodev.rst
++++ b/Doc/library/ossaudiodev.rst
+@@ -275,7 +275,7 @@
+ simple calculations.
+
+
+-.. method:: oss_audio_device.setparameters(format, nchannels, samplerate [, strict=False])
++.. method:: oss_audio_device.setparameters(format, nchannels, samplerate[, strict=False])
+
+ Set the key audio sampling parameters---sample format, number of channels, and
+ sampling rate---in one method call. *format*, *nchannels*, and *samplerate*
+diff -r 70274d53c1dd Doc/library/parser.rst
+--- a/Doc/library/parser.rst
++++ b/Doc/library/parser.rst
+@@ -34,7 +34,7 @@
+ replaced by "ast"; this is a legacy from the time when there was no other
+ AST and has nothing to do with the AST found in Python 2.5. This is also the
+ reason for the functions' keyword arguments being called *ast*, not *st*.
+- The "ast" functions will be removed in Python 3.0.
++ The "ast" functions have been removed in Python 3.
+
+ There are a few things to note about this module which are important to making
+ use of the data structures created. This is not a tutorial on editing the parse
+@@ -200,7 +200,7 @@
+ information is omitted if the flag is false or omitted.
+
+
+-.. function:: compilest(ast[, filename='<syntax-tree>'])
++.. function:: compilest(ast, filename='<syntax-tree>')
+
+ .. index:: builtin: eval
+
+diff -r 70274d53c1dd Doc/library/pickletools.rst
+--- a/Doc/library/pickletools.rst
++++ b/Doc/library/pickletools.rst
+@@ -20,7 +20,7 @@
+ probably won't find the :mod:`pickletools` module relevant.
+
+
+-.. function:: dis(pickle[, out=None, memo=None, indentlevel=4])
++.. function:: dis(pickle, out=None, memo=None, indentlevel=4)
+
+ Outputs a symbolic disassembly of the pickle to the file-like object *out*,
+ defaulting to ``sys.stdout``. *pickle* can be a string or a file-like object.
+diff -r 70274d53c1dd Doc/library/plistlib.rst
+--- a/Doc/library/plistlib.rst
++++ b/Doc/library/plistlib.rst
+@@ -74,7 +74,7 @@
+
+
+
+-.. function:: readPlistFromResource(path[, restype='plst'[, resid=0]])
++.. function:: readPlistFromResource(path, restype='plst', resid=0)
+
+ Read a plist from the resource with type *restype* from the resource fork of
+ *path*. Availability: Mac OS X.
+@@ -84,7 +84,7 @@
+ In Python 3.x, this function has been removed.
+
+
+-.. function:: writePlistToResource(rootObject, path[, restype='plst'[, resid=0]])
++.. function:: writePlistToResource(rootObject, path, restype='plst', resid=0)
+
+ Write *rootObject* as a resource with type *restype* to the resource fork of
+ *path*. Availability: Mac OS X.
+diff -r 70274d53c1dd Doc/library/profile.rst
+--- a/Doc/library/profile.rst
++++ b/Doc/library/profile.rst
+@@ -298,7 +298,7 @@
+ :synopsis: Statistics object for use with the profiler.
+
+
+-.. class:: Stats(filename[, stream=sys.stdout[, ...]])
++.. class:: Stats(filename, stream=sys.stdout[, ...])
+
+ This class constructor creates an instance of a "statistics object" from a
+ *filename* (or set of filenames). :class:`Stats` objects are manipulated by
+@@ -607,13 +607,10 @@
+ best results with a custom timer, it might be necessary to hard-code it in the C
+ source of the internal :mod:`_lsprof` module.
+
++
+ .. rubric:: Footnotes
+
+-.. [#] Updated and converted to LaTeX by Guido van Rossum. Further updated by Armin
+- Rigo to integrate the documentation for the new :mod:`cProfile` module of Python
+- 2.5.
+-
+-.. [#] Prior to Python 2.2, it was necessary to edit the profiler source code to embed
+- the bias as a literal number. You still can, but that method is no longer
++.. [#] Prior to Python 2.2, it was necessary to edit the profiler source code to
++ embed the bias as a literal number. You still can, but that method is no longer
+ described, because no longer needed.
+
+diff -r 70274d53c1dd Doc/library/pyclbr.rst
+--- a/Doc/library/pyclbr.rst
++++ b/Doc/library/pyclbr.rst
+@@ -19,7 +19,7 @@
+ modules.
+
+
+-.. function:: readmodule(module[, path=None])
++.. function:: readmodule(module, path=None)
+
+ Read a module and return a dictionary mapping class names to class
+ descriptor objects. The parameter *module* should be the name of a
+@@ -28,7 +28,7 @@
+ of ``sys.path``, which is used to locate module source code.
+
+
+-.. function:: readmodule_ex(module[, path=None])
++.. function:: readmodule_ex(module, path=None)
+
+ Like :func:`readmodule`, but the returned dictionary, in addition to
+ mapping class names to class descriptor objects, also maps top-level
+diff -r 70274d53c1dd Doc/library/queue.rst
+--- a/Doc/library/queue.rst
++++ b/Doc/library/queue.rst
+@@ -5,9 +5,9 @@
+ :synopsis: A synchronized queue class.
+
+ .. note::
+- The :mod:`Queue` module has been renamed to :mod:`queue` in Python 3.0. The
++ The :mod:`Queue` module has been renamed to :mod:`queue` in Python 3. The
+ :term:`2to3` tool will automatically adapt imports when converting your
+- sources to 3.0.
++ sources to Python 3.
+
+ **Source code:** :source:`Lib/Queue.py`
+
+@@ -20,8 +20,8 @@
+ availability of thread support in Python; see the :mod:`threading`
+ module.
+
+-Implements three types of queue whose only difference is the order that
+-the entries are retrieved. In a FIFO queue, the first tasks added are
++The module implements three types of queue, which differ only in the order in
++which the entries are retrieved. In a FIFO queue, the first tasks added are
+ the first retrieved. In a LIFO queue, the most recently added entry is
+ the first retrieved (operating like a stack). With a priority queue,
+ the entries are kept sorted (using the :mod:`heapq` module) and the
+diff -r 70274d53c1dd Doc/library/random.rst
+--- a/Doc/library/random.rst
++++ b/Doc/library/random.rst
+@@ -90,7 +90,7 @@
+
+ *state* should have been obtained from a previous call to :func:`getstate`, and
+ :func:`setstate` restores the internal state of the generator to what it was at
+- the time :func:`setstate` was called.
++ the time :func:`getstate` was called.
+
+ .. versionadded:: 2.1
+
+diff -r 70274d53c1dd Doc/library/re.rst
+--- a/Doc/library/re.rst
++++ b/Doc/library/re.rst
+@@ -273,7 +273,7 @@
+ lookbehind will back up 3 characters and check if the contained pattern matches.
+ The contained pattern must only match strings of some fixed length, meaning that
+ ``abc`` or ``a|b`` are allowed, but ``a*`` and ``a{3,4}`` are not. Note that
+- patterns which start with positive lookbehind assertions will never match at the
++ patterns which start with positive lookbehind assertions will not match at the
+ beginning of the string being searched; you will most likely want to use the
+ :func:`search` function rather than the :func:`match` function:
+
+@@ -325,14 +325,19 @@
+ Matches the empty string, but only at the beginning or end of a word. A word is
+ defined as a sequence of alphanumeric or underscore characters, so the end of a
+ word is indicated by whitespace or a non-alphanumeric, non-underscore character.
+- Note that ``\b`` is defined as the boundary between ``\w`` and ``\W``, so the
+- precise set of characters deemed to be alphanumeric depends on the values of the
+- ``UNICODE`` and ``LOCALE`` flags. Inside a character range, ``\b`` represents
+- the backspace character, for compatibility with Python's string literals.
++ Note that formally, ``\b`` is defined as the boundary between a ``\w`` and
++ a ``\W`` character (or vice versa), or between ``\w`` and the beginning/end
++ of the string, so the precise set of characters deemed to be alphanumeric
++ depends on the values of the ``UNICODE`` and ``LOCALE`` flags.
++ For example, ``r'\bfoo\b'`` matches ``'foo'``, ``'foo.'``, ``'(foo)'``,
++ ``'bar foo baz'`` but not ``'foobar'`` or ``'foo3'``.
++ Inside a character range, ``\b`` represents the backspace character, for compatibility with Python's string literals.
+
+ ``\B``
+ Matches the empty string, but only when it is *not* at the beginning or end of a
+- word. This is just the opposite of ``\b``, so is also subject to the settings
++ word. This means that ``r'py\B'`` matches ``'python'``, ``'py3'``, ``'py2'``,
++ but not ``'py'``, ``'py.'``, or ``'py!'``.
++ ``\B`` is just the opposite of ``\b``, so is also subject to the settings
+ of ``LOCALE`` and ``UNICODE``.
+
+ ``\d``
+@@ -348,20 +353,20 @@
+ character properties database.
+
+ ``\s``
+- When the :const:`LOCALE` and :const:`UNICODE` flags are not specified, matches
+- any whitespace character; this is equivalent to the set ``[ \t\n\r\f\v]``. With
+- :const:`LOCALE`, it will match this set plus whatever characters are defined as
+- space for the current locale. If :const:`UNICODE` is set, this will match the
+- characters ``[ \t\n\r\f\v]`` plus whatever is classified as space in the Unicode
+- character properties database.
++ When the :const:`UNICODE` flag is not specified, it matches any whitespace
++ character, this is equivalent to the set ``[ \t\n\r\f\v]``. The
++ :const:`LOCALE` flag has no extra effect on matching of the space.
++ If :const:`UNICODE` is set, this will match the characters ``[ \t\n\r\f\v]``
++ plus whatever is classified as space in the Unicode character properties
++ database.
+
+ ``\S``
+- When the :const:`LOCALE` and :const:`UNICODE` flags are not specified, matches
+- any non-whitespace character; this is equivalent to the set ``[^ \t\n\r\f\v]``
+- With :const:`LOCALE`, it will match any character not in this set, and not
+- defined as space in the current locale. If :const:`UNICODE` is set, this will
+- match anything other than ``[ \t\n\r\f\v]`` and characters marked as space in
+- the Unicode character properties database.
++ When the :const:`UNICODE` flags is not specified, matches any non-whitespace
++ character; this is equivalent to the set ``[^ \t\n\r\f\v]`` The
++ :const:`LOCALE` flag has no extra effect on non-whitespace match. If
++ :const:`UNICODE` is set, then any character not marked as space in the
++ Unicode character properties database is matched.
++
+
+ ``\w``
+ When the :const:`LOCALE` and :const:`UNICODE` flags are not specified, matches
+@@ -376,12 +381,16 @@
+ any non-alphanumeric character; this is equivalent to the set ``[^a-zA-Z0-9_]``.
+ With :const:`LOCALE`, it will match any character not in the set ``[0-9_]``, and
+ not defined as alphanumeric for the current locale. If :const:`UNICODE` is set,
+- this will match anything other than ``[0-9_]`` and characters marked as
+- alphanumeric in the Unicode character properties database.
++ this will match anything other than ``[0-9_]`` plus characters classied as
++ not alphanumeric in the Unicode character properties database.
+
+ ``\Z``
+ Matches only at the end of the string.
+
++If both :const:`LOCALE` and :const:`UNICODE` flags are included for a
++particular sequence, then :const:`LOCALE` flag takes effect first followed by
++the :const:`UNICODE`.
++
+ Most of the standard escapes supported by Python string literals are also
+ accepted by the regular expression parser::
+
+@@ -389,37 +398,15 @@
+ \r \t \v \x
+ \\
+
++(Note that ``\b`` is used to represent word boundaries, and means "backspace"
++only inside character classes.)
++
+ Octal escapes are included in a limited form: If the first digit is a 0, or if
+ there are three octal digits, it is considered an octal escape. Otherwise, it is
+ a group reference. As for string literals, octal escapes are always at most
+ three digits in length.
+
+
+-.. _matching-searching:
+-
+-Matching vs Searching
+----------------------
+-
+-.. sectionauthor:: Fred L. Drake, Jr. <fdrake@acm.org>
+-
+-
+-Python offers two different primitive operations based on regular expressions:
+-**match** checks for a match only at the beginning of the string, while
+-**search** checks for a match anywhere in the string (this is what Perl does
+-by default).
+-
+-Note that match may differ from search even when using a regular expression
+-beginning with ``'^'``: ``'^'`` matches only at the start of the string, or in
+-:const:`MULTILINE` mode also immediately following a newline. The "match"
+-operation succeeds only if the pattern matches at the start of the string
+-regardless of mode, or at the starting position given by the optional *pos*
+-argument regardless of whether a newline precedes it.
+-
+- >>> re.match("c", "abcdef") # No match
+- >>> re.search("c", "abcdef") # Match
+- <_sre.SRE_Match object at ...>
+-
+-
+ .. _contents-of-module-re:
+
+ Module Contents
+@@ -542,10 +529,11 @@
+ Return ``None`` if the string does not match the pattern; note that this is
+ different from a zero-length match.
+
+- .. note::
++ Note that even in :const:`MULTILINE` mode, :func:`re.match` will only match
++ at the beginning of the string and not at the beginning of each line.
+
+- If you want to locate a match anywhere in *string*, use :func:`search`
+- instead.
++ If you want to locate a match anywhere in *string*, use :func:`search`
++ instead (see also :ref:`search-vs-match`).
+
+
+ .. function:: split(pattern, string, maxsplit=0, flags=0)
+@@ -741,16 +729,14 @@
+ The optional *pos* and *endpos* parameters have the same meaning as for the
+ :meth:`~RegexObject.search` method.
+
+- .. note::
+-
+- If you want to locate a match anywhere in *string*, use
+- :meth:`~RegexObject.search` instead.
+-
+ >>> pattern = re.compile("o")
+ >>> pattern.match("dog") # No match as "o" is not at the start of "dog".
+ >>> pattern.match("dog", 1) # Match as "o" is the 2nd character of "dog".
+ <_sre.SRE_Match object at ...>
+
++ If you want to locate a match anywhere in *string*, use
++ :meth:`~RegexObject.search` instead (see also :ref:`search-vs-match`).
++
+
+ .. method:: RegexObject.split(string, maxsplit=0)
+
+@@ -783,8 +769,8 @@
+
+ .. attribute:: RegexObject.flags
+
+- The flags argument used when the RE object was compiled, or ``0`` if no flags
+- were provided.
++ The regex matching flags. This is a combination of the flags given to
++ :func:`.compile` and any ``(?...)`` inline flags in the pattern.
+
+
+ .. attribute:: RegexObject.groups
+@@ -1072,13 +1058,13 @@
+ +--------------------------------+---------------------------------------------+
+ | ``%i`` | ``[-+]?(0[xX][\dA-Fa-f]+|0[0-7]*|\d+)`` |
+ +--------------------------------+---------------------------------------------+
+-| ``%o`` | ``0[0-7]*`` |
++| ``%o`` | ``[-+]?[0-7]+`` |
+ +--------------------------------+---------------------------------------------+
+ | ``%s`` | ``\S+`` |
+ +--------------------------------+---------------------------------------------+
+ | ``%u`` | ``\d+`` |
+ +--------------------------------+---------------------------------------------+
+-| ``%x``, ``%X`` | ``0[xX][\dA-Fa-f]+`` |
++| ``%x``, ``%X`` | ``[-+]?(0[xX])?[\dA-Fa-f]+`` |
+ +--------------------------------+---------------------------------------------+
+
+ To extract the filename and numbers from a string like ::
+@@ -1094,59 +1080,39 @@
+ (\S+) - (\d+) errors, (\d+) warnings
+
+
+-Avoiding recursion
+-^^^^^^^^^^^^^^^^^^
+-
+-If you create regular expressions that require the engine to perform a lot of
+-recursion, you may encounter a :exc:`RuntimeError` exception with the message
+-``maximum recursion limit`` exceeded. For example, ::
+-
+- >>> s = 'Begin ' + 1000*'a very long string ' + 'end'
+- >>> re.match('Begin (\w| )*? end', s).end()
+- Traceback (most recent call last):
+- File "<stdin>", line 1, in ?
+- File "/usr/local/lib/python2.5/re.py", line 132, in match
+- return _compile(pattern, flags).match(string)
+- RuntimeError: maximum recursion limit exceeded
+-
+-You can often restructure your regular expression to avoid recursion.
+-
+-Starting with Python 2.3, simple uses of the ``*?`` pattern are special-cased to
+-avoid recursion. Thus, the above regular expression can avoid recursion by
+-being recast as ``Begin [a-zA-Z0-9_ ]*?end``. As a further benefit, such
+-regular expressions will run faster than their recursive equivalents.
+-
++.. _search-vs-match:
+
+ search() vs. match()
+ ^^^^^^^^^^^^^^^^^^^^
+
+-In a nutshell, :func:`match` only attempts to match a pattern at the beginning
+-of a string where :func:`search` will match a pattern anywhere in a string.
+-For example:
++.. sectionauthor:: Fred L. Drake, Jr. <fdrake@acm.org>
+
+- >>> re.match("o", "dog") # No match as "o" is not the first letter of "dog".
+- >>> re.search("o", "dog") # Match as search() looks everywhere in the string.
++Python offers two different primitive operations based on regular expressions:
++:func:`re.match` checks for a match only at the beginning of the string, while
++:func:`re.search` checks for a match anywhere in the string (this is what Perl
++does by default).
++
++For example::
++
++ >>> re.match("c", "abcdef") # No match
++ >>> re.search("c", "abcdef") # Match
+ <_sre.SRE_Match object at ...>
+
+-.. note::
++Regular expressions beginning with ``'^'`` can be used with :func:`search` to
++restrict the match at the beginning of the string::
+
+- The following applies only to regular expression objects like those created
+- with ``re.compile("pattern")``, not the primitives ``re.match(pattern,
+- string)`` or ``re.search(pattern, string)``.
++ >>> re.match("c", "abcdef") # No match
++ >>> re.search("^c", "abcdef") # No match
++ >>> re.search("^a", "abcdef") # Match
++ <_sre.SRE_Match object at ...>
+
+-:func:`match` has an optional second parameter that gives an index in the string
+-where the search is to start::
++Note however that in :const:`MULTILINE` mode :func:`match` only matches at the
++beginning of the string, whereas using :func:`search` with a regular expression
++beginning with ``'^'`` will match at the beginning of each line.
+
+- >>> pattern = re.compile("o")
+- >>> pattern.match("dog") # No match as "o" is not at the start of "dog."
+-
+- # Equivalent to the above expression as 0 is the default starting index:
+- >>> pattern.match("dog", 0)
+-
+- # Match as "o" is the 2nd character of "dog" (index 0 is the first):
+- >>> pattern.match("dog", 1)
++ >>> re.match('X', 'A\nB\nX', re.MULTILINE) # No match
++ >>> re.search('^X', 'A\nB\nX', re.MULTILINE) # Match
+ <_sre.SRE_Match object at ...>
+- >>> pattern.match("dog", 2) # No match as "o" is not the 3rd character of "dog."
+
+
+ Making a Phonebook
+@@ -1160,7 +1126,7 @@
+ First, here is the input. Normally it may come from a file, here we are using
+ triple-quoted string syntax:
+
+- >>> input = """Ross McFluff: 834.345.1254 155 Elm Street
++ >>> text = """Ross McFluff: 834.345.1254 155 Elm Street
+ ...
+ ... Ronald Heathmore: 892.345.3428 436 Finley Avenue
+ ... Frank Burger: 925.541.7625 662 South Dogwood Way
+@@ -1174,7 +1140,7 @@
+ .. doctest::
+ :options: +NORMALIZE_WHITESPACE
+
+- >>> entries = re.split("\n+", input)
++ >>> entries = re.split("\n+", text)
+ >>> entries
+ ['Ross McFluff: 834.345.1254 155 Elm Street',
+ 'Ronald Heathmore: 892.345.3428 436 Finley Avenue',
+diff -r 70274d53c1dd Doc/library/repr.rst
+--- a/Doc/library/repr.rst
++++ b/Doc/library/repr.rst
+@@ -6,9 +6,9 @@
+ .. sectionauthor:: Fred L. Drake, Jr. <fdrake@acm.org>
+
+ .. note::
+- The :mod:`repr` module has been renamed to :mod:`reprlib` in Python 3.0. The
++ The :mod:`repr` module has been renamed to :mod:`reprlib` in Python 3. The
+ :term:`2to3` tool will automatically adapt imports when converting your
+- sources to 3.0.
++ sources to Python 3.
+
+ **Source code:** :source:`Lib/repr.py`
+
+diff -r 70274d53c1dd Doc/library/rexec.rst
+--- a/Doc/library/rexec.rst
++++ b/Doc/library/rexec.rst
+@@ -6,7 +6,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`rexec` module has been removed in Python 3.0.
++ The :mod:`rexec` module has been removed in Python 3.
+
+ .. versionchanged:: 2.3
+ Disabled module.
+diff -r 70274d53c1dd Doc/library/rfc822.rst
+--- a/Doc/library/rfc822.rst
++++ b/Doc/library/rfc822.rst
+@@ -10,7 +10,7 @@
+ .. deprecated:: 2.3
+ The :mod:`email` package should be used in preference to the :mod:`rfc822`
+ module. This module is present only to maintain backward compatibility, and
+- has been removed in 3.0.
++ has been removed in Python 3.
+
+ This module defines a class, :class:`Message`, which represents an "email
+ message" as defined by the Internet standard :rfc:`2822`. [#]_ Such messages
+diff -r 70274d53c1dd Doc/library/robotparser.rst
+--- a/Doc/library/robotparser.rst
++++ b/Doc/library/robotparser.rst
+@@ -16,9 +16,9 @@
+
+ .. note::
+ The :mod:`robotparser` module has been renamed :mod:`urllib.robotparser` in
+- Python 3.0.
++ Python 3.
+ The :term:`2to3` tool will automatically adapt imports when converting
+- your sources to 3.0.
++ your sources to Python 3.
+
+ This module provides a single class, :class:`RobotFileParser`, which answers
+ questions about whether or not a particular user agent can fetch a URL on the
+diff -r 70274d53c1dd Doc/library/scrolledtext.rst
+--- a/Doc/library/scrolledtext.rst
++++ b/Doc/library/scrolledtext.rst
+@@ -16,8 +16,8 @@
+ .. note::
+
+ :mod:`ScrolledText` has been renamed to :mod:`tkinter.scrolledtext` in Python
+- 3.0. The :term:`2to3` tool will automatically adapt imports when converting
+- your sources to 3.0.
++ 3. The :term:`2to3` tool will automatically adapt imports when converting
++ your sources to Python 3.
+
+ The text widget and scrollbar are packed together in a :class:`Frame`, and the
+ methods of the :class:`Grid` and :class:`Pack` geometry managers are acquired
+diff -r 70274d53c1dd Doc/library/sgmllib.rst
+--- a/Doc/library/sgmllib.rst
++++ b/Doc/library/sgmllib.rst
+@@ -6,7 +6,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`sgmllib` module has been removed in Python 3.0.
++ The :mod:`sgmllib` module has been removed in Python 3.
+
+ .. index:: single: SGML
+
+diff -r 70274d53c1dd Doc/library/shelve.rst
+--- a/Doc/library/shelve.rst
++++ b/Doc/library/shelve.rst
+@@ -18,7 +18,7 @@
+ lots of shared sub-objects. The keys are ordinary strings.
+
+
+-.. function:: open(filename[, flag='c'[, protocol=None[, writeback=False]]])
++.. function:: open(filename, flag='c', protocol=None, writeback=False)
+
+ Open a persistent dictionary. The filename specified is the base filename for
+ the underlying database. As a side-effect, an extension may be added to the
+@@ -100,7 +100,7 @@
+ implementation used.
+
+
+-.. class:: Shelf(dict[, protocol=None[, writeback=False]])
++.. class:: Shelf(dict, protocol=None, writeback=False)
+
+ A subclass of :class:`UserDict.DictMixin` which stores pickled values in the
+ *dict* object.
+@@ -118,7 +118,7 @@
+ memory and make sync and close take a long time.
+
+
+-.. class:: BsdDbShelf(dict[, protocol=None[, writeback=False]])
++.. class:: BsdDbShelf(dict, protocol=None, writeback=False)
+
+ A subclass of :class:`Shelf` which exposes :meth:`first`, :meth:`!next`,
+ :meth:`previous`, :meth:`last` and :meth:`set_location` which are available in
+@@ -129,7 +129,7 @@
+ the same interpretation as for the :class:`Shelf` class.
+
+
+-.. class:: DbfilenameShelf(filename[, flag='c'[, protocol=None[, writeback=False]]])
++.. class:: DbfilenameShelf(filename, flag='c', protocol=None, writeback=False)
+
+ A subclass of :class:`Shelf` which accepts a *filename* instead of a dict-like
+ object. The underlying file will be opened using :func:`anydbm.open`. By
+diff -r 70274d53c1dd Doc/library/shutil.rst
+--- a/Doc/library/shutil.rst
++++ b/Doc/library/shutil.rst
+@@ -31,6 +31,8 @@
+ are not copied.
+
+
++.. _file-operations:
++
+ Directory and files operations
+ ------------------------------
+
+@@ -94,7 +96,7 @@
+ .. versionadded:: 2.6
+
+
+-.. function:: copytree(src, dst[, symlinks=False[, ignore=None]])
++.. function:: copytree(src, dst, symlinks=False, ignore=None)
+
+ Recursively copy an entire directory tree rooted at *src*. The destination
+ directory, named by *dst*, must not already exist; it will be created as
+@@ -185,7 +187,7 @@
+ .. versionadded:: 2.3
+
+
+-.. _shutil-example:
++.. _copytree-example:
+
+ copytree example
+ ::::::::::::::::
+@@ -254,8 +256,13 @@
+ copytree(source, destination, ignore=_logpath)
+
+
+-Archives operations
+--------------------
++.. _archiving-operations:
++
++Archiving operations
++--------------------
++
++High-level utilities to create and read compressed and archived files are also
++provided. They rely on the :mod:`zipfile` and :mod:`tarfile` modules.
+
+ .. function:: make_archive(base_name, format, [root_dir, [base_dir, [verbose, [dry_run, [owner, [group, [logger]]]]]]])
+
+@@ -278,7 +285,8 @@
+ *owner* and *group* are used when creating a tar archive. By default,
+ uses the current owner and group.
+
+- *logger* is an instance of :class:`logging.Logger`.
++ *logger* must be an object compatible with :pep:`282`, usually an instance of
++ :class:`logging.Logger`.
+
+ .. versionadded:: 2.7
+
+@@ -322,6 +330,8 @@
+ .. versionadded:: 2.7
+
+
++.. _archiving-example:
++
+ Archiving example
+ :::::::::::::::::
+
+@@ -346,5 +356,3 @@
+ -rw------- tarek/staff 1675 2008-06-09 13:26:54 ./id_rsa
+ -rw-r--r-- tarek/staff 397 2008-06-09 13:26:54 ./id_rsa.pub
+ -rw-r--r-- tarek/staff 37192 2010-02-06 18:23:10 ./known_hosts
+-
+-
+diff -r 70274d53c1dd Doc/library/simplehttpserver.rst
+--- a/Doc/library/simplehttpserver.rst
++++ b/Doc/library/simplehttpserver.rst
+@@ -8,8 +8,8 @@
+
+ .. note::
+ The :mod:`SimpleHTTPServer` module has been merged into :mod:`http.server` in
+- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
+- converting your sources to 3.0.
++ Python 3. The :term:`2to3` tool will automatically adapt imports when
++ converting your sources to Python 3.
+
+
+ The :mod:`SimpleHTTPServer` module defines a single class,
+diff -r 70274d53c1dd Doc/library/simplexmlrpcserver.rst
+--- a/Doc/library/simplexmlrpcserver.rst
++++ b/Doc/library/simplexmlrpcserver.rst
+@@ -8,8 +8,8 @@
+
+ .. note::
+ The :mod:`SimpleXMLRPCServer` module has been merged into
+- :mod:`xmlrpc.server` in Python 3.0. The :term:`2to3` tool will automatically
+- adapt imports when converting your sources to 3.0.
++ :mod:`xmlrpc.server` in Python 3. The :term:`2to3` tool will automatically
++ adapt imports when converting your sources to Python 3.
+
+
+ .. versionadded:: 2.2
+diff -r 70274d53c1dd Doc/library/socket.rst
+--- a/Doc/library/socket.rst
++++ b/Doc/library/socket.rst
+@@ -38,7 +38,7 @@
+ :const:`AF_UNIX` address family. A pair ``(host, port)`` is used for the
+ :const:`AF_INET` address family, where *host* is a string representing either a
+ hostname in Internet domain notation like ``'daring.cwi.nl'`` or an IPv4 address
+-like ``'100.50.200.5'``, and *port* is an integral port number. For
++like ``'100.50.200.5'``, and *port* is an integer. For
+ :const:`AF_INET6` address family, a four-tuple ``(host, port, flowinfo,
+ scopeid)`` is used, where *flowinfo* and *scopeid* represents ``sin6_flowinfo``
+ and ``sin6_scope_id`` member in :const:`struct sockaddr_in6` in C. For
+@@ -72,17 +72,17 @@
+ tuple, and the fields depend on the address type. The general tuple form is
+ ``(addr_type, v1, v2, v3 [, scope])``, where:
+
+- - *addr_type* is one of TIPC_ADDR_NAMESEQ, TIPC_ADDR_NAME, or
+- TIPC_ADDR_ID.
+- - *scope* is one of TIPC_ZONE_SCOPE, TIPC_CLUSTER_SCOPE, and
+- TIPC_NODE_SCOPE.
+- - If *addr_type* is TIPC_ADDR_NAME, then *v1* is the server type, *v2* is
++ - *addr_type* is one of :const;`TIPC_ADDR_NAMESEQ`, :const:`TIPC_ADDR_NAME`,
++ or :const:`TIPC_ADDR_ID`.
++ - *scope* is one of :const:`TIPC_ZONE_SCOPE`, :const:`TIPC_CLUSTER_SCOPE`,
++ and :const:`TIPC_NODE_SCOPE`.
++ - If *addr_type* is :const:`TIPC_ADDR_NAME`, then *v1* is the server type, *v2* is
+ the port identifier, and *v3* should be 0.
+
+- If *addr_type* is TIPC_ADDR_NAMESEQ, then *v1* is the server type, *v2*
++ If *addr_type* is :const:`TIPC_ADDR_NAMESEQ`, then *v1* is the server type, *v2*
+ is the lower port number, and *v3* is the upper port number.
+
+- If *addr_type* is TIPC_ADDR_ID, then *v1* is the node, *v2* is the
++ If *addr_type* is :const:`TIPC_ADDR_ID`, then *v1* is the node, *v2* is the
+ reference, and *v3* should be set to 0.
+
+
+diff -r 70274d53c1dd Doc/library/socketserver.rst
+--- a/Doc/library/socketserver.rst
++++ b/Doc/library/socketserver.rst
+@@ -7,8 +7,8 @@
+ .. note::
+
+ The :mod:`SocketServer` module has been renamed to :mod:`socketserver` in
+- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
+- converting your sources to 3.0.
++ Python 3. The :term:`2to3` tool will automatically adapt imports when
++ converting your sources to Python 3.
+
+ **Source code:** :source:`Lib/SocketServer.py`
+
+diff -r 70274d53c1dd Doc/library/sqlite3.rst
+--- a/Doc/library/sqlite3.rst
++++ b/Doc/library/sqlite3.rst
+@@ -3,7 +3,7 @@
+
+ .. module:: sqlite3
+ :synopsis: A DB-API 2.0 implementation using SQLite 3.x.
+-.. sectionauthor:: Gerhard Häring <gh@ghaering.de>
++.. sectionauthor:: Gerhard Häring <gh@ghaering.de>
+
+
+ .. versionadded:: 2.5
+@@ -15,15 +15,15 @@
+ application using SQLite and then port the code to a larger database such as
+ PostgreSQL or Oracle.
+
+-sqlite3 was written by Gerhard Häring and provides a SQL interface compliant
+-with the DB-API 2.0 specification described by :pep:`249`.
++The sqlite3 module was written by Gerhard Häring. It provides a SQL interface
++compliant with the DB-API 2.0 specification described by :pep:`249`.
+
+ To use the module, you must first create a :class:`Connection` object that
+ represents the database. Here the data will be stored in the
+-:file:`/tmp/example` file::
++:file:`example.db` file::
+
+ import sqlite3
+- conn = sqlite3.connect('/tmp/example')
++ conn = sqlite3.connect('example.db')
+
+ You can also supply the special name ``:memory:`` to create a database in RAM.
+
+@@ -33,13 +33,11 @@
+ c = conn.cursor()
+
+ # Create table
+- c.execute('''create table stocks
+- (date text, trans text, symbol text,
+- qty real, price real)''')
++ c.execute('''CREATE TABLE stocks
++ (date text, trans text, symbol text, qty real, price real)''')
+
+ # Insert a row of data
+- c.execute("""insert into stocks
+- values ('2006-01-05','BUY','RHAT',100,35.14)""")
++ c.execute("INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14)")
+
+ # Save (commit) the changes
+ conn.commit()
+@@ -47,9 +45,16 @@
+ # We can also close the cursor if we are done with it
+ c.close()
+
++The data you've saved is persistent and is available in subsequent sessions::
++
++ import sqlite3
++ conn = sqlite3.connect('example.db')
++ c = conn.cursor()
++
+ Usually your SQL operations will need to use values from Python variables. You
+ shouldn't assemble your query using Python's string operations because doing so
+-is insecure; it makes your program vulnerable to an SQL injection attack.
++is insecure; it makes your program vulnerable to an SQL injection attack
++(see http://xkcd.com/327/ for humorous example of what can go wrong).
+
+ Instead, use the DB-API's parameter substitution. Put ``?`` as a placeholder
+ wherever you want to use a value, and then provide a tuple of values as the
+@@ -58,19 +63,20 @@
+ example::
+
+ # Never do this -- insecure!
+- symbol = 'IBM'
+- c.execute("select * from stocks where symbol = '%s'" % symbol)
++ symbol = 'RHAT'
++ c.execute("SELECT * FROM stocks WHERE symbol = '%s'" % symbol)
+
+ # Do this instead
+ t = (symbol,)
+- c.execute('select * from stocks where symbol=?', t)
++ c.execute('SELECT * FROM stocks WHERE symbol=?', t)
++ print c.fetchone()
+
+- # Larger example
+- for t in [('2006-03-28', 'BUY', 'IBM', 1000, 45.00),
+- ('2006-04-05', 'BUY', 'MSFT', 1000, 72.00),
+- ('2006-04-06', 'SELL', 'IBM', 500, 53.00),
+- ]:
+- c.execute('insert into stocks values (?,?,?,?,?)', t)
++ # Larger example that inserts many records at a time
++ purchases = [('2006-03-28', 'BUY', 'IBM', 1000, 45.00),
++ ('2006-04-05', 'BUY', 'MSFT', 1000, 72.00),
++ ('2006-04-06', 'SELL', 'IBM', 500, 53.00),
++ ]
++ c.executemany('INSERT INTO stocks VALUES (?,?,?,?,?)', purchases)
+
+ To retrieve data after executing a SELECT statement, you can either treat the
+ cursor as an :term:`iterator`, call the cursor's :meth:`~Cursor.fetchone` method to
+@@ -79,16 +85,13 @@
+
+ This example uses the iterator form::
+
+- >>> c = conn.cursor()
+- >>> c.execute('select * from stocks order by price')
+- >>> for row in c:
+- ... print row
+- ...
++ >>> for row in c.execute('SELECT * FROM stocks ORDER BY price'):
++ print row
++
+ (u'2006-01-05', u'BUY', u'RHAT', 100, 35.14)
+ (u'2006-03-28', u'BUY', u'IBM', 1000, 45.0)
+ (u'2006-04-06', u'SELL', u'IBM', 500, 53.0)
+ (u'2006-04-05', u'BUY', u'MSFT', 1000, 72.0)
+- >>>
+
+
+ .. seealso::
+@@ -101,6 +104,9 @@
+ The SQLite web page; the documentation describes the syntax and the
+ available data types for the supported SQL dialect.
+
++ http://www.w3schools.com/sql/
++ Tutorial, reference and examples for learning SQL syntax.
++
+ :pep:`249` - Database API Specification 2.0
+ PEP written by Marc-André Lemburg.
+
+@@ -237,7 +243,6 @@
+ supplied, this must be a custom cursor class that extends
+ :class:`sqlite3.Cursor`.
+
+-
+ .. method:: Connection.commit()
+
+ This method commits the current transaction. If you don't call this method,
+@@ -357,8 +362,6 @@
+
+ .. method:: Connection.set_progress_handler(handler, n)
+
+- .. versionadded:: 2.6
+-
+ This routine registers a callback. The callback is invoked for every *n*
+ instructions of the SQLite virtual machine. This is useful if you want to
+ get called from SQLite during long-running operations, for example to update
+@@ -367,29 +370,31 @@
+ If you want to clear any previously installed progress handler, call the
+ method with :const:`None` for *handler*.
+
++ .. versionadded:: 2.6
++
+
+ .. method:: Connection.enable_load_extension(enabled)
+
+- .. versionadded:: 2.7
+-
+ This routine allows/disallows the SQLite engine to load SQLite extensions
+ from shared libraries. SQLite extensions can define new functions,
+ aggregates or whole new virtual table implementations. One well-known
+ extension is the fulltext-search extension distributed with SQLite.
+
++ Loadable extensions are disabled by default. See [#f1]_.
++
++ .. versionadded:: 2.7
++
+ .. literalinclude:: ../includes/sqlite3/load_extension.py
+
+- Loadable extensions are disabled by default. See [#f1]_
+-
+ .. method:: Connection.load_extension(path)
+
+- .. versionadded:: 2.7
+-
+ This routine loads a SQLite extension from a shared library. You have to
+ enable extension loading with :meth:`enable_load_extension` before you can
+ use this routine.
+
+- Loadable extensions are disabled by default. See [#f1]_
++ Loadable extensions are disabled by default. See [#f1]_.
++
++ .. versionadded:: 2.7
+
+ .. attribute:: Connection.row_factory
+
+@@ -468,19 +473,15 @@
+
+ .. method:: Cursor.execute(sql, [parameters])
+
+- Executes an SQL statement. The SQL statement may be parametrized (i. e.
++ Executes an SQL statement. The SQL statement may be parameterized (i. e.
+ placeholders instead of SQL literals). The :mod:`sqlite3` module supports two
+ kinds of placeholders: question marks (qmark style) and named placeholders
+ (named style).
+
+- This example shows how to use parameters with qmark style:
++ Here's an example of both styles:
+
+ .. literalinclude:: ../includes/sqlite3/execute_1.py
+
+- This example shows how to use the named style:
+-
+- .. literalinclude:: ../includes/sqlite3/execute_2.py
+-
+ :meth:`execute` will only execute a single SQL statement. If you try to execute
+ more than one statement with it, it will raise a Warning. Use
+ :meth:`executescript` if you want to execute multiple SQL statements with one
+@@ -633,7 +634,8 @@
+ ['date', 'trans', 'symbol', 'qty', 'price']
+ >>> r['qty']
+ 100.0
+- >>> for member in r: print member
++ >>> for member in r:
++ ... print member
+ ...
+ 2006-01-05
+ BUY
+diff -r 70274d53c1dd Doc/library/statvfs.rst
+--- a/Doc/library/statvfs.rst
++++ b/Doc/library/statvfs.rst
+@@ -6,7 +6,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`statvfs` module has been deprecated for removal in Python 3.0.
++ The :mod:`statvfs` module has been removed in Python 3.
+
+
+ .. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
+diff -r 70274d53c1dd Doc/library/stdtypes.rst
+--- a/Doc/library/stdtypes.rst
++++ b/Doc/library/stdtypes.rst
+@@ -969,7 +969,7 @@
+ See :ref:`formatstrings` for a description of the various formatting options
+ that can be specified in format strings.
+
+- This method of string formatting is the new standard in Python 3.0, and
++ This method of string formatting is the new standard in Python 3, and
+ should be preferred to the ``%`` formatting described in
+ :ref:`string-formatting` in new code.
+
+@@ -1161,8 +1161,8 @@
+ Return a list of the words in the string, using *sep* as the delimiter
+ string. If *maxsplit* is given, at most *maxsplit* splits are done (thus,
+ the list will have at most ``maxsplit+1`` elements). If *maxsplit* is not
+- specified, then there is no limit on the number of splits (all possible
+- splits are made).
++ specified or ``-1``, then there is no limit on the number of splits
++ (all possible splits are made).
+
+ If *sep* is given, consecutive delimiters are not grouped together and are
+ deemed to delimit empty strings (for example, ``'1,,2'.split(',')`` returns
+@@ -1183,9 +1183,18 @@
+
+ .. method:: str.splitlines([keepends])
+
+- Return a list of the lines in the string, breaking at line boundaries. Line
+- breaks are not included in the resulting list unless *keepends* is given and
+- true.
++ Return a list of the lines in the string, breaking at line boundaries.
++ This method uses the universal newlines approach to splitting lines.
++ Line breaks are not included in the resulting list unless *keepends* is
++ given and true.
++
++ For example, ``'ab c\n\nde fg\rkl\r\n'.splitlines()`` returns
++ ``['ab c', '', 'de fg', 'kl']``, while the same call with ``splitlines(True)``
++ returns ``['ab c\n', '\n', 'de fg\r', 'kl\r\n']``.
++
++ Unlike :meth:`~str.split` when a delimiter string *sep* is given, this
++ method returns an empty list for the empty string, and a terminal line
++ break does not result in an extra line.
+
+
+ .. method:: str.startswith(prefix[, start[, end]])
+@@ -2378,7 +2387,7 @@
+ make a :keyword:`for` loop the most efficient way of looping over the lines of a
+ file (a very common operation), the :meth:`~file.next` method uses a hidden read-ahead
+ buffer. As a consequence of using a read-ahead buffer, combining :meth:`~file.next`
+- with other file methods (like :meth:`readline`) does not work right. However,
++ with other file methods (like :meth:`~file.readline`) does not work right. However,
+ using :meth:`seek` to reposition the file to an absolute position will flush the
+ read-ahead buffer.
+
+@@ -2420,7 +2429,7 @@
+
+ .. method:: file.readlines([sizehint])
+
+- Read until EOF using :meth:`readline` and return a list containing the lines
++ Read until EOF using :meth:`~file.readline` and return a list containing the lines
+ thus read. If the optional *sizehint* argument is present, instead of
+ reading up to EOF, whole lines totalling approximately *sizehint* bytes
+ (possibly after rounding up to an internal buffer size) are read. Objects
+@@ -2500,7 +2509,7 @@
+ add line separators.)
+
+ Files support the iterator protocol. Each iteration returns the same result as
+-``file.readline()``, and iteration ends when the :meth:`readline` method returns
++:meth:`~file.readline`, and iteration ends when the :meth:`~file.readline` method returns
+ an empty string.
+
+ File objects also offer a number of other interesting attributes. These are not
+diff -r 70274d53c1dd Doc/library/string.rst
+--- a/Doc/library/string.rst
++++ b/Doc/library/string.rst
+@@ -322,7 +322,7 @@
+
+ .. productionlist:: sf
+ format_spec: [[`fill`]`align`][`sign`][#][0][`width`][,][.`precision`][`type`]
+- fill: <a character other than '}'>
++ fill: <a character other than '{' or '}'>
+ align: "<" | ">" | "=" | "^"
+ sign: "+" | "-" | " "
+ width: `integer`
+@@ -793,7 +793,7 @@
+ The following list of functions are also defined as methods of string and
+ Unicode objects; see section :ref:`string-methods` for more information on
+ those. You should consider these functions as deprecated, although they will
+-not be removed until Python 3.0. The functions defined in this module are:
++not be removed until Python 3. The functions defined in this module are:
+
+
+ .. function:: atof(s)
+@@ -905,14 +905,15 @@
+
+ Return a list of the words of the string *s*. If the optional second argument
+ *sep* is absent or ``None``, the words are separated by arbitrary strings of
+- whitespace characters (space, tab, newline, return, formfeed). If the second
++ whitespace characters (space, tab, newline, return, formfeed). If the second
+ argument *sep* is present and not ``None``, it specifies a string to be used as
+ the word separator. The returned list will then have one more item than the
+- number of non-overlapping occurrences of the separator in the string. The
+- optional third argument *maxsplit* defaults to 0. If it is nonzero, at most
+- *maxsplit* number of splits occur, and the remainder of the string is returned
+- as the final element of the list (thus, the list will have at most
+- ``maxsplit+1`` elements).
++ number of non-overlapping occurrences of the separator in the string.
++ If *maxsplit* is given, at most *maxsplit* number of splits occur, and the
++ remainder of the string is returned as the final element of the list (thus,
++ the list will have at most ``maxsplit+1`` elements). If *maxsplit* is not
++ specified or ``-1``, then there is no limit on the number of splits (all
++ possible splits are made).
+
+ The behavior of split on an empty string depends on the value of *sep*. If *sep*
+ is not specified, or specified as ``None``, the result will be an empty list.
+@@ -925,7 +926,7 @@
+ Return a list of the words of the string *s*, scanning *s* from the end. To all
+ intents and purposes, the resulting list of words is the same as returned by
+ :func:`split`, except when the optional third argument *maxsplit* is explicitly
+- specified and nonzero. When *maxsplit* is nonzero, at most *maxsplit* number of
++ specified and nonzero. If *maxsplit* is given, at most *maxsplit* number of
+ splits -- the *rightmost* ones -- occur, and the remainder of the string is
+ returned as the first element of the list (thus, the list will have at most
+ ``maxsplit+1`` elements).
+diff -r 70274d53c1dd Doc/library/struct.rst
+--- a/Doc/library/struct.rst
++++ b/Doc/library/struct.rst
+@@ -386,7 +386,7 @@
+ (``len(string)`` must equal :attr:`self.size`).
+
+
+- .. method:: unpack_from(buffer[, offset=0])
++ .. method:: unpack_from(buffer, offset=0)
+
+ Identical to the :func:`unpack_from` function, using the compiled format.
+ (``len(buffer[offset:])`` must be at least :attr:`self.size`).
+diff -r 70274d53c1dd Doc/library/subprocess.rst
+--- a/Doc/library/subprocess.rst
++++ b/Doc/library/subprocess.rst
+@@ -172,6 +172,26 @@
+ output.
+
+
++.. exception:: CalledProcessError
++
++ Exception raised when a process run by :func:`check_call` or
++ :func:`check_output` returns a non-zero exit status.
++
++ .. attribute:: returncode
++
++ Exit status of the child process.
++
++ .. attribute:: cmd
++
++ Command that was used to spawn the child process.
++
++ .. attribute:: output
++
++ Output of the child process if this exception is raised by
++ :func:`check_output`. Otherwise, ``None``.
++
++
++
+ .. _frequently-used-arguments:
+
+ Frequently Used Arguments
+@@ -289,7 +309,7 @@
+
+ Popen(['/bin/sh', '-c', args[0], args[1], ...])
+
+- On Windows: the :class:`Popen` class uses CreateProcess() to execute the child
++ On Windows: the :class:`Popen` class uses CreateProcess() to execute the
+ child program, which operates on strings. If *args* is a sequence, it will
+ be converted to a string in a manner described in
+ :ref:`converting-argument-sequence`.
+@@ -685,7 +705,7 @@
+ to receive a SIGPIPE if p2 exits before p1.
+
+ Alternatively, for trusted input, the shell's own pipeline support may still
+-be used directly:
++be used directly::
+
+ output=`dmesg | grep hda`
+ # becomes
+diff -r 70274d53c1dd Doc/library/sunaudio.rst
+--- a/Doc/library/sunaudio.rst
++++ b/Doc/library/sunaudio.rst
+@@ -8,7 +8,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`sunaudiodev` module has been deprecated for removal in Python 3.0.
++ The :mod:`sunaudiodev` module has been removed in Python 3.
+
+
+
+@@ -153,7 +153,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`SUNAUDIODEV` module has been deprecated for removal in Python 3.0.
++ The :mod:`SUNAUDIODEV` module has been removed in Python 3.
+
+
+
+diff -r 70274d53c1dd Doc/library/sys.rst
+--- a/Doc/library/sys.rst
++++ b/Doc/library/sys.rst
+@@ -1,4 +1,3 @@
+-
+ :mod:`sys` --- System-specific parameters and functions
+ =======================================================
+
+@@ -208,7 +207,7 @@
+ be set at build time with the ``--exec-prefix`` argument to the
+ :program:`configure` script. Specifically, all configuration files (e.g. the
+ :file:`pyconfig.h` header file) are installed in the directory
+- :file:`{exec_prefix}/lib/python{X.Y}/config', and shared library modules are
++ :file:`{exec_prefix}/lib/python{X.Y}/config`, and shared library modules are
+ installed in :file:`{exec_prefix}/lib/python{X.Y}/lib-dynload`, where *X.Y*
+ is the version number of Python, for example ``2.7``.
+
+@@ -291,6 +290,8 @@
+
+ .. versionadded:: 2.6
+
++ .. versionadded:: 2.7.3
++ The ``hash_randomization`` attribute.
+
+ .. data:: float_info
+
+@@ -775,7 +776,7 @@
+ argument to the :program:`configure` script. The main collection of Python
+ library modules is installed in the directory :file:`{prefix}/lib/python{X.Y}``
+ while the platform independent header files (all except :file:`pyconfig.h`) are
+- stored in :file:`{prefix}/include/python{X.Y}``, where *X.Y* is the version
++ stored in :file:`{prefix}/include/python{X.Y}`, where *X.Y* is the version
+ number of Python, for example ``2.7``.
+
+
+@@ -796,10 +797,10 @@
+
+ .. data:: py3kwarning
+
+- Bool containing the status of the Python 3.0 warning flag. It's ``True``
++ Bool containing the status of the Python 3 warning flag. It's ``True``
+ when Python is started with the -3 option. (This should be considered
+ read-only; setting it to a different value doesn't have an effect on
+- Python 3.0 warnings.)
++ Python 3 warnings.)
+
+ .. versionadded:: 2.6
+
+diff -r 70274d53c1dd Doc/library/sysconfig.rst
+--- a/Doc/library/sysconfig.rst
++++ b/Doc/library/sysconfig.rst
+@@ -129,7 +129,7 @@
+ one may call this function and get the default value.
+
+ If *scheme* is provided, it must be a value from the list returned by
+- :func:`get_path_names`. Otherwise, the default scheme for the current
++ :func:`get_scheme_names`. Otherwise, the default scheme for the current
+ platform is used.
+
+ If *vars* is provided, it must be a dictionary of variables that will update
+diff -r 70274d53c1dd Doc/library/syslog.rst
+--- a/Doc/library/syslog.rst
++++ b/Doc/library/syslog.rst
+@@ -73,7 +73,8 @@
+ Facilities:
+ :const:`LOG_KERN`, :const:`LOG_USER`, :const:`LOG_MAIL`, :const:`LOG_DAEMON`,
+ :const:`LOG_AUTH`, :const:`LOG_LPR`, :const:`LOG_NEWS`, :const:`LOG_UUCP`,
+- :const:`LOG_CRON` and :const:`LOG_LOCAL0` to :const:`LOG_LOCAL7`.
++ :const:`LOG_CRON`, :const:`LOG_SYSLOG` and :const:`LOG_LOCAL0` to
++ :const:`LOG_LOCAL7`.
+
+ Log options:
+ :const:`LOG_PID`, :const:`LOG_CONS`, :const:`LOG_NDELAY`, :const:`LOG_NOWAIT`
+diff -r 70274d53c1dd Doc/library/tarfile.rst
+--- a/Doc/library/tarfile.rst
++++ b/Doc/library/tarfile.rst
+@@ -16,7 +16,8 @@
+
+ The :mod:`tarfile` module makes it possible to read and write tar
+ archives, including those using gzip or bz2 compression.
+-(:file:`.zip` files can be read and written using the :mod:`zipfile` module.)
++Use the :mod:`zipfile` module to read or write :file:`.zip` files, or the
++higher-level functions in :ref:`shutil <archiving-operations>`.
+
+ Some facts and figures:
+
+@@ -142,7 +143,7 @@
+
+
+ .. deprecated:: 2.6
+- The :class:`TarFileCompat` class has been deprecated for removal in Python 3.0.
++ The :class:`TarFileCompat` class has been removed in Python 3.
+
+
+ .. exception:: TarError
+diff -r 70274d53c1dd Doc/library/test.rst
+--- a/Doc/library/test.rst
++++ b/Doc/library/test.rst
+@@ -169,10 +169,10 @@
+ the test passed or failed and thus minimize output.
+
+ Running :mod:`test.regrtest` directly allows what resources are available for
+-tests to use to be set. You do this by using the :option:`-u` command-line
+-option. Run :program:`python -m test.regrtest -uall` to turn on all
+-resources; specifying ``all`` as an option for ``-u`` enables all
+-possible resources. If all but one resource is desired (a more common case), a
++tests to use to be set. You do this by using the ``-u`` command-line
++option. Specifying ``all`` as the value for the ``-u`` option enables all
++possible resources: :program:`python -m test -uall`.
++If all but one resource is desired (a more common case), a
+ comma-separated list of resources that are not desired may be listed after
+ ``all``. The command :program:`python -m test.regrtest -uall,-audio,-largefile`
+ will run :mod:`test.regrtest` with all resources except the ``audio`` and
+@@ -380,7 +380,7 @@
+
+ with captured_stdout() as s:
+ print "hello"
+- assert s.getvalue() == "hello"
++ assert s.getvalue() == "hello\n"
+
+ .. versionadded:: 2.6
+
+diff -r 70274d53c1dd Doc/library/textwrap.rst
+--- a/Doc/library/textwrap.rst
++++ b/Doc/library/textwrap.rst
+@@ -112,9 +112,11 @@
+
+ .. attribute:: replace_whitespace
+
+- (default: ``True``) If true, each whitespace character (as defined by
+- ``string.whitespace``) remaining after tab expansion will be replaced by a
+- single space.
++ (default: ``True``) If true, after tab expansion but before wrapping,
++ the :meth:`wrap` method will replace each whitespace character
++ with a single space. The whitespace characters replaced are
++ as follows: tab, newline, vertical tab, formfeed, and carriage
++ return (``'\t\n\v\f\r'``).
+
+ .. note::
+
+diff -r 70274d53c1dd Doc/library/thread.rst
+--- a/Doc/library/thread.rst
++++ b/Doc/library/thread.rst
+@@ -5,9 +5,9 @@
+ :synopsis: Create multiple threads of control within one interpreter.
+
+ .. note::
+- The :mod:`thread` module has been renamed to :mod:`_thread` in Python 3.0.
++ The :mod:`thread` module has been renamed to :mod:`_thread` in Python 3.
+ The :term:`2to3` tool will automatically adapt imports when converting your
+- sources to 3.0; however, you should consider using the high-level
++ sources to Python 3; however, you should consider using the high-level
+ :mod:`threading` module instead.
+
+
+diff -r 70274d53c1dd Doc/library/threading.rst
+--- a/Doc/library/threading.rst
++++ b/Doc/library/threading.rst
+@@ -401,15 +401,12 @@
+
+ Acquire a lock, blocking or non-blocking.
+
+- When invoked without arguments, block until the lock is unlocked, then set it to
+- locked, and return true.
++ When invoked with the *blocking* argument set to ``True`` (the default),
++ block until the lock is unlocked, then set it to locked and return ``True``.
+
+- When invoked with the *blocking* argument set to true, do the same thing as when
+- called without arguments, and return true.
+-
+- When invoked with the *blocking* argument set to false, do not block. If a call
+- without an argument would block, return false immediately; otherwise, do the
+- same thing as when called without arguments, and return true.
++ When invoked with the *blocking* argument set to ``False``, do not block.
++ If a call with *blocking* set to ``True`` would block, return ``False``
++ immediately; otherwise, set the lock to locked and return ``True``.
+
+
+ .. method:: Lock.release()
+@@ -420,7 +417,7 @@
+ are blocked waiting for the lock to become unlocked, allow exactly one of them
+ to proceed.
+
+- Do not call this method when the lock is unlocked.
++ When invoked on an unlocked lock, a :exc:`ThreadError` is raised.
+
+ There is no return value.
+
+diff -r 70274d53c1dd Doc/library/time.rst
+--- a/Doc/library/time.rst
++++ b/Doc/library/time.rst
+@@ -71,9 +71,9 @@
+ the units in which their value or argument is expressed. E.g. on most Unix
+ systems, the clock "ticks" only 50 or 100 times a second.
+
+-* On the other hand, the precision of :func:`time` and :func:`sleep` is better
++* On the other hand, the precision of :func:`.time` and :func:`sleep` is better
+ than their Unix equivalents: times are expressed as floating point numbers,
+- :func:`time` returns the most accurate time available (using Unix
++ :func:`.time` returns the most accurate time available (using Unix
+ :c:func:`gettimeofday` where available), and :func:`sleep` will accept a time
+ with a nonzero fraction (Unix :c:func:`select` is used to implement this, where
+ available).
+@@ -164,7 +164,7 @@
+
+ Convert a time expressed in seconds since the epoch to a string representing
+ local time. If *secs* is not provided or :const:`None`, the current time as
+- returned by :func:`time` is used. ``ctime(secs)`` is equivalent to
++ returned by :func:`.time` is used. ``ctime(secs)`` is equivalent to
+ ``asctime(localtime(secs))``. Locale information is not used by :func:`ctime`.
+
+ .. versionchanged:: 2.1
+@@ -183,7 +183,7 @@
+
+ Convert a time expressed in seconds since the epoch to a :class:`struct_time` in
+ UTC in which the dst flag is always zero. If *secs* is not provided or
+- :const:`None`, the current time as returned by :func:`time` is used. Fractions
++ :const:`None`, the current time as returned by :func:`.time` is used. Fractions
+ of a second are ignored. See above for a description of the
+ :class:`struct_time` object. See :func:`calendar.timegm` for the inverse of this
+ function.
+@@ -198,7 +198,7 @@
+ .. function:: localtime([secs])
+
+ Like :func:`gmtime` but converts to local time. If *secs* is not provided or
+- :const:`None`, the current time as returned by :func:`time` is used. The dst
++ :const:`None`, the current time as returned by :func:`.time` is used. The dst
+ flag is set to ``1`` when DST applies to the given time.
+
+ .. versionchanged:: 2.1
+@@ -213,7 +213,7 @@
+ This is the inverse function of :func:`localtime`. Its argument is the
+ :class:`struct_time` or full 9-tuple (since the dst flag is needed; use ``-1``
+ as the dst flag if it is unknown) which expresses the time in *local* time, not
+- UTC. It returns a floating point number, for compatibility with :func:`time`.
++ UTC. It returns a floating point number, for compatibility with :func:`.time`.
+ If the input value cannot be represented as a valid time, either
+ :exc:`OverflowError` or :exc:`ValueError` will be raised (which depends on
+ whether the invalid value is caught by Python or the underlying C libraries).
+@@ -410,7 +410,7 @@
+ +-------+-------------------+---------------------------------+
+ | 4 | :attr:`tm_min` | range [0, 59] |
+ +-------+-------------------+---------------------------------+
+- | 5 | :attr:`tm_sec` | range [0, 61]; see **(1)** in |
++ | 5 | :attr:`tm_sec` | range [0, 61]; see **(2)** in |
+ | | | :func:`strftime` description |
+ +-------+-------------------+---------------------------------+
+ | 6 | :attr:`tm_wday` | range [0, 6], Monday is 0 |
+@@ -435,8 +435,8 @@
+
+ .. function:: time()
+
+- Return the time as a floating point number expressed in seconds since the epoch,
+- in UTC. Note that even though the time is always returned as a floating point
++ Return the time in seconds since the epoch as a floating point number.
++ Note that even though the time is always returned as a floating point
+ number, not all systems provide time with a better precision than 1 second.
+ While this function normally returns non-decreasing values, it can return a
+ lower value than a previous call if the system clock has been set back between
+diff -r 70274d53c1dd Doc/library/timeit.rst
+--- a/Doc/library/timeit.rst
++++ b/Doc/library/timeit.rst
+@@ -101,10 +101,20 @@
+
+ timeit.Timer('for i in xrange(10): oct(i)', 'gc.enable()').timeit()
+
+-Starting with version 2.6, the module also defines two convenience functions:
++The module also defines three convenience functions:
+
+
+-.. function:: repeat(stmt[, setup[, timer[, repeat=3 [, number=1000000]]]])
++.. function:: default_timer()
++
++ Define a default timer, in a platform specific manner. On Windows,
++ :func:`time.clock` has microsecond granularity but :func:`time.time`'s
++ granularity is 1/60th of a second; on Unix, :func:`time.clock` has 1/100th of
++ a second granularity and :func:`time.time` is much more precise. On either
++ platform, :func:`default_timer` measures wall clock time, not the CPU
++ time. This means that other processes running on the same computer may
++ interfere with the timing.
++
++.. function:: repeat(stmt, setup='pass', timer=default_timer, repeat=3 , number=1000000)
+
+ Create a :class:`Timer` instance with the given statement, setup code and timer
+ function and run its :meth:`repeat` method with the given repeat count and
+@@ -113,7 +123,7 @@
+ .. versionadded:: 2.6
+
+
+-.. function:: timeit(stmt[, setup[, timer[, number=1000000]]])
++.. function:: timeit(stmt, setup='pass', timer=default_timer, number=1000000)
+
+ Create a :class:`Timer` instance with the given statement, setup code and timer
+ function and run its :meth:`timeit` method with *number* executions.
+@@ -168,13 +178,9 @@
+ If :option:`-n` is not given, a suitable number of loops is calculated by trying
+ successive powers of 10 until the total time is at least 0.2 seconds.
+
+-The default timer function is platform dependent. On Windows,
+-:func:`time.clock` has microsecond granularity but :func:`time.time`'s
+-granularity is 1/60th of a second; on Unix, :func:`time.clock` has 1/100th of a
+-second granularity and :func:`time.time` is much more precise. On either
+-platform, the default timer functions measure wall clock time, not the CPU time.
+-This means that other processes running on the same computer may interfere with
+-the timing. The best thing to do when accurate timing is necessary is to repeat
++:func:`default_timer` measurations can be affected by other programs running on
++the same machine, so
++the best thing to do when accurate timing is necessary is to repeat
+ the timing a few times and use the best time. The :option:`-r` option is good
+ for this; the default of 3 repetitions is probably enough in most cases. On
+ Unix, you can use :func:`time.clock` to measure CPU time.
+diff -r 70274d53c1dd Doc/library/tix.rst
+--- a/Doc/library/tix.rst
++++ b/Doc/library/tix.rst
+@@ -24,9 +24,9 @@
+
+ .. note::
+
+- :mod:`Tix` has been renamed to :mod:`tkinter.tix` in Python 3.0. The
++ :mod:`Tix` has been renamed to :mod:`tkinter.tix` in Python 3. The
+ :term:`2to3` tool will automatically adapt imports when converting your
+- sources to 3.0.
++ sources to Python 3.
+
+ .. seealso::
+
+diff -r 70274d53c1dd Doc/library/tkinter.rst
+--- a/Doc/library/tkinter.rst
++++ b/Doc/library/tkinter.rst
+@@ -13,9 +13,9 @@
+
+ .. note::
+
+- :mod:`Tkinter` has been renamed to :mod:`tkinter` in Python 3.0. The
++ :mod:`Tkinter` has been renamed to :mod:`tkinter` in Python 3. The
+ :term:`2to3` tool will automatically adapt imports when converting your
+- sources to 3.0.
++ sources to Python 3.
+
+ .. seealso::
+
+@@ -23,12 +23,27 @@
+ The Python Tkinter Topic Guide provides a great deal of information on using Tk
+ from Python and links to other sources of information on Tk.
+
++ `TKDocs <http://www.tkdocs.com/>`_
++ Extensive tutorial plus friendlier widget pages for some of the widgets.
++
++ `Tkinter reference: a GUI for Python <http://infohost.nmt.edu/tcc/help/pubs/tkinter/>`_
++ On-line reference material.
++
++ `Tkinter docs from effbot <http://effbot.org/tkinterbook/>`_
++ Online reference for tkinter supported by effbot.org.
++
++ `Tcl/Tk manual <http://www.tcl.tk/man/tcl8.5/>`_
++ Official manual for the latest tcl/tk version.
++
++ `Programming Python <http://www.amazon.com/Programming-Python-Mark-Lutz/dp/0596158106/>`_
++ Book by Mark Lutz, has excellent coverage of Tkinter.
++
++ `Modern Tkinter for Busy Python Developers <http://www.amazon.com/Modern-Tkinter-Python-Developers-ebook/dp/B0071QDNLO/>`_
++ Book by Mark Rozerman about building attractive and modern graphical user interfaces with Python and Tkinter.
++
+ `An Introduction to Tkinter <http://www.pythonware.com/library/an-introduction-to-tkinter.htm>`_
+ Fredrik Lundh's on-line reference material.
+
+- `Tkinter reference: a GUI for Python <http://infohost.nmt.edu/tcc/help/pubs/lang.html>`_
+- On-line reference material.
+-
+ `Python and Tkinter Programming <http://www.amazon.com/exec/obidos/ASIN/1884777813>`_
+ The book by John Grayson (ISBN 1-884777-81-3).
+
+@@ -109,7 +124,7 @@
+ :mod:`turtle`
+ Turtle graphics in a Tk window.
+
+-These have been renamed as well in Python 3.0; they were all made submodules of
++These have been renamed as well in Python 3; they were all made submodules of
+ the new ``tkinter`` package.
+
+
+diff -r 70274d53c1dd Doc/library/tokenize.rst
+--- a/Doc/library/tokenize.rst
++++ b/Doc/library/tokenize.rst
+@@ -29,7 +29,8 @@
+ which must be a callable object which provides the same interface as the
+ :meth:`readline` method of built-in file objects (see section
+ :ref:`bltin-file-objects`). Each call to the function should return one line
+- of input as a string.
++ of input as a string. Alternately, *readline* may be a callable object that
++ signals completion by raising :exc:`StopIteration`.
+
+ The generator produces 5-tuples with these members: the token type; the token
+ string; a 2-tuple ``(srow, scol)`` of ints specifying the row and column
+diff -r 70274d53c1dd Doc/library/trace.rst
+--- a/Doc/library/trace.rst
++++ b/Doc/library/trace.rst
+@@ -149,7 +149,7 @@
+ the current tracing parameters. *cmd* must be a string or code object,
+ suitable for passing into :func:`exec`.
+
+- .. method:: runctx(cmd[, globals=None[, locals=None]])
++ .. method:: runctx(cmd, globals=None, locals=None)
+
+ Execute the command and gather statistics from the execution with the
+ current tracing parameters, in the defined global and local
+diff -r 70274d53c1dd Doc/library/ttk.rst
+--- a/Doc/library/ttk.rst
++++ b/Doc/library/ttk.rst
+@@ -265,7 +265,7 @@
+ *x* and *y* are pixel coordinates relative to the widget.
+
+
+- .. method:: instate(statespec[, callback=None[, *args[, **kw]]])
++ .. method:: instate(statespec, callback=None, *args, **kw)
+
+ Test the widget's state. If a callback is not specified, returns True
+ if the widget state matches *statespec* and False otherwise. If callback
+@@ -523,7 +523,7 @@
+ omitted, returns the widget name of the currently selected pane.
+
+
+- .. method:: tab(tab_id[, option=None[, **kw]])
++ .. method:: tab(tab_id, option=None, **kw)
+
+ Query or modify the options of the specific *tab_id*.
+
+@@ -846,7 +846,7 @@
+
+ .. class:: Treeview
+
+- .. method:: bbox(item[, column=None])
++ .. method:: bbox(item, column=None)
+
+ Returns the bounding box (relative to the treeview widget's window) of
+ the specified *item* in the form (x, y, width, height).
+@@ -873,7 +873,7 @@
+ *item*'s children.
+
+
+- .. method:: column(column[, option=None[, **kw]])
++ .. method:: column(column, option=None, **kw)
+
+ Query or modify the options for the specified *column*.
+
+@@ -928,7 +928,7 @@
+ the current focus item, or '' if there is none.
+
+
+- .. method:: heading(column[, option=None[, **kw]])
++ .. method:: heading(column, option=None, **kw)
+
+ Query or modify the heading options for the specified *column*.
+
+@@ -1001,7 +1001,7 @@
+ Returns the integer index of *item* within its parent's list of children.
+
+
+- .. method:: insert(parent, index[, iid=None[, **kw]])
++ .. method:: insert(parent, index, iid=None, **kw)
+
+ Creates a new item and returns the item identifier of the newly created
+ item.
+@@ -1096,7 +1096,7 @@
+ Toggle the selection state of each item in *items*.
+
+
+- .. method:: set(item[, column=None[, value=None]])
++ .. method:: set(item, column=None, value=None)
+
+ With one argument, returns a dictionary of column/value pairs for the
+ specified *item*. With two arguments, returns the current value of the
+@@ -1104,14 +1104,14 @@
+ *column* in given *item* to the specified *value*.
+
+
+- .. method:: tag_bind(tagname[, sequence=None[, callback=None]])
++ .. method:: tag_bind(tagname, sequence=None, callback=None)
+
+ Bind a callback for the given event *sequence* to the tag *tagname*.
+ When an event is delivered to an item, the callbacks for each of the
+ item's tags option are called.
+
+
+- .. method:: tag_configure(tagname[, option=None[, **kw]])
++ .. method:: tag_configure(tagname, option=None, **kw)
+
+ Query or modify the options for the specified *tagname*.
+
+@@ -1220,7 +1220,7 @@
+ foreground option, for example, you would get a blue foreground
+ when the widget is in the active or pressed states.
+
+- .. method:: lookup(style, option[, state=None[, default=None]])
++ .. method:: lookup(style, option, state=None, default=None)
+
+ Returns the value specified for *option* in *style*.
+
+@@ -1235,7 +1235,7 @@
+ print ttk.Style().lookup("TButton", "font")
+
+
+- .. method:: layout(style[, layoutspec=None])
++ .. method:: layout(style, layoutspec=None)
+
+ Define the widget layout for given *style*. If *layoutspec* is omitted,
+ return the layout specification for given style.
+@@ -1318,7 +1318,7 @@
+ Returns the list of *elementname*'s options.
+
+
+- .. method:: theme_create(themename[, parent=None[, settings=None]])
++ .. method:: theme_create(themename, parent=None, settings=None)
+
+ Create a new theme.
+
+diff -r 70274d53c1dd Doc/library/unittest.rst
+--- a/Doc/library/unittest.rst
++++ b/Doc/library/unittest.rst
+@@ -616,7 +616,7 @@
+
+ Classes can be skipped just like methods: ::
+
+- @skip("showing class skipping")
++ @unittest.skip("showing class skipping")
+ class MySkippedTestCase(unittest.TestCase):
+ def test_not_run(self):
+ pass
+@@ -1426,8 +1426,8 @@
+ The :class:`TestLoader` class is used to create test suites from classes and
+ modules. Normally, there is no need to create an instance of this class; the
+ :mod:`unittest` module provides an instance that can be shared as
+- ``unittest.defaultTestLoader``. Using a subclass or instance, however, allows
+- customization of some configurable properties.
++ :data:`unittest.defaultTestLoader`. Using a subclass or instance, however,
++ allows customization of some configurable properties.
+
+ :class:`TestLoader` objects have the following methods:
+
+@@ -1784,11 +1784,12 @@
+ stream, descriptions, verbosity
+
+
+-.. function:: main([module[, defaultTest[, argv[, testRunner[, testLoader[, exit[, verbosity[, failfast[, catchbreak[,buffer]]]]]]]]]])
+-
+- A command-line program that runs a set of tests; this is primarily for making
+- test modules conveniently executable. The simplest use for this function is to
+- include the following line at the end of a test script::
++.. function:: main([module[, defaultTest[, argv[, testRunner[, testLoader[, exit[, verbosity[, failfast[, catchbreak[, buffer]]]]]]]]]])
++
++ A command-line program that loads a set of tests from *module* and runs them;
++ this is primarily for making test modules conveniently executable.
++ The simplest use for this function is to include the following line at the
++ end of a test script::
+
+ if __name__ == '__main__':
+ unittest.main()
+@@ -1799,10 +1800,17 @@
+ if __name__ == '__main__':
+ unittest.main(verbosity=2)
+
++ The *argv* argument can be a list of options passed to the program, with the
++ first element being the program name. If not specified or ``None``,
++ the values of :data:`sys.argv` are used.
++
+ The *testRunner* argument can either be a test runner class or an already
+ created instance of it. By default ``main`` calls :func:`sys.exit` with
+ an exit code indicating success or failure of the tests run.
+
++ The *testLoader* argument has to be a :class:`TestLoader` instance,
++ and defaults to :data:`defaultTestLoader`.
++
+ ``main`` supports being used from the interactive interpreter by passing in the
+ argument ``exit=False``. This displays the result on standard output without
+ calling :func:`sys.exit`::
+@@ -1810,14 +1818,14 @@
+ >>> from unittest import main
+ >>> main(module='test_module', exit=False)
+
+- The ``failfast``, ``catchbreak`` and ``buffer`` parameters have the same
++ The *failfast*, *catchbreak* and *buffer* parameters have the same
+ effect as the same-name `command-line options`_.
+
+ Calling ``main`` actually returns an instance of the ``TestProgram`` class.
+ This stores the result of the tests run as the ``result`` attribute.
+
+ .. versionchanged:: 2.7
+- The ``exit``, ``verbosity``, ``failfast``, ``catchbreak`` and ``buffer``
++ The *exit*, *verbosity*, *failfast*, *catchbreak* and *buffer*
+ parameters were added.
+
+
+diff -r 70274d53c1dd Doc/library/urllib.rst
+--- a/Doc/library/urllib.rst
++++ b/Doc/library/urllib.rst
+@@ -6,11 +6,11 @@
+
+ .. note::
+ The :mod:`urllib` module has been split into parts and renamed in
+- Python 3.0 to :mod:`urllib.request`, :mod:`urllib.parse`,
++ Python 3 to :mod:`urllib.request`, :mod:`urllib.parse`,
+ and :mod:`urllib.error`. The :term:`2to3` tool will automatically adapt
+- imports when converting your sources to 3.0.
++ imports when converting your sources to Python 3.
+ Also note that the :func:`urllib.urlopen` function has been removed in
+- Python 3.0 in favor of :func:`urllib2.urlopen`.
++ Python 3 in favor of :func:`urllib2.urlopen`.
+
+ .. index::
+ single: WWW
+@@ -131,7 +131,7 @@
+ :envvar:`no_proxy` environment variable.
+
+ .. deprecated:: 2.6
+- The :func:`urlopen` function has been removed in Python 3.0 in favor
++ The :func:`urlopen` function has been removed in Python 3 in favor
+ of :func:`urllib2.urlopen`.
+
+
+diff -r 70274d53c1dd Doc/library/urllib2.rst
+--- a/Doc/library/urllib2.rst
++++ b/Doc/library/urllib2.rst
+@@ -9,9 +9,9 @@
+
+ .. note::
+ The :mod:`urllib2` module has been split across several modules in
+- Python 3.0 named :mod:`urllib.request` and :mod:`urllib.error`.
++ Python 3 named :mod:`urllib.request` and :mod:`urllib.error`.
+ The :term:`2to3` tool will automatically adapt imports when converting
+- your sources to 3.0.
++ your sources to Python 3.
+
+
+ The :mod:`urllib2` module defines functions and classes which help in opening
+@@ -380,6 +380,17 @@
+ Return the selector --- the part of the URL that is sent to the server.
+
+
++.. method:: Request.get_header(header_name, default=None)
++
++ Return the value of the given header. If the header is not present, return
++ the default value.
++
++
++.. method:: Request.header_items()
++
++ Return a list of tuples (header_name, header_value) of the Request headers.
++
++
+ .. method:: Request.set_proxy(host, type)
+
+ Prepare the request by connecting to a proxy server. The *host* and *type* will
+diff -r 70274d53c1dd Doc/library/urlparse.rst
+--- a/Doc/library/urlparse.rst
++++ b/Doc/library/urlparse.rst
+@@ -13,9 +13,9 @@
+ pair: relative; URL
+
+ .. note::
+- The :mod:`urlparse` module is renamed to :mod:`urllib.parse` in Python 3.0.
++ The :mod:`urlparse` module is renamed to :mod:`urllib.parse` in Python 3.
+ The :term:`2to3` tool will automatically adapt imports when converting
+- your sources to 3.0.
++ your sources to Python 3.
+
+ **Source code:** :source:`Lib/urlparse.py`
+
+@@ -27,11 +27,11 @@
+ to an absolute URL given a "base URL."
+
+ The module has been designed to match the Internet RFC on Relative Uniform
+-Resource Locators (and discovered a bug in an earlier draft!). It supports the
+-following URL schemes: ``file``, ``ftp``, ``gopher``, ``hdl``, ``http``,
+-``https``, ``imap``, ``mailto``, ``mms``, ``news``, ``nntp``, ``prospero``,
+-``rsync``, ``rtsp``, ``rtspu``, ``sftp``, ``shttp``, ``sip``, ``sips``,
+-``snews``, ``svn``, ``svn+ssh``, ``telnet``, ``wais``.
++Resource Locators. It supports the following URL schemes: ``file``, ``ftp``,
++``gopher``, ``hdl``, ``http``, ``https``, ``imap``, ``mailto``, ``mms``,
++``news``, ``nntp``, ``prospero``, ``rsync``, ``rtsp``, ``rtspu``, ``sftp``,
++``shttp``, ``sip``, ``sips``, ``snews``, ``svn``, ``svn+ssh``, ``telnet``,
++``wais``.
+
+ .. versionadded:: 2.5
+ Support for the ``sftp`` and ``sips`` schemes.
+diff -r 70274d53c1dd Doc/library/user.rst
+--- a/Doc/library/user.rst
++++ b/Doc/library/user.rst
+@@ -7,7 +7,7 @@
+ :deprecated:
+
+ .. deprecated:: 2.6
+- The :mod:`user` module has been removed in Python 3.0.
++ The :mod:`user` module has been removed in Python 3.
+
+ .. index::
+ pair: .pythonrc.py; file
+diff -r 70274d53c1dd Doc/library/userdict.rst
+--- a/Doc/library/userdict.rst
++++ b/Doc/library/userdict.rst
+@@ -114,8 +114,8 @@
+
+ .. note::
+ The :class:`UserList` class has been moved to the :mod:`collections`
+- module in Python 3.0. The :term:`2to3` tool will automatically adapt
+- imports when converting your sources to 3.0.
++ module in Python 3. The :term:`2to3` tool will automatically adapt
++ imports when converting your sources to Python 3.
+
+
+ In addition to supporting the methods and operations of mutable sequences (see
+@@ -187,8 +187,8 @@
+
+ .. note::
+ The :class:`UserString` class has been moved to the :mod:`collections`
+- module in Python 3.0. The :term:`2to3` tool will automatically adapt
+- imports when converting your sources to 3.0.
++ module in Python 3. The :term:`2to3` tool will automatically adapt
++ imports when converting your sources to Python 3.
+
+
+
+@@ -203,7 +203,7 @@
+ hard to track down.
+
+ .. deprecated:: 2.6
+- The :class:`MutableString` class has been removed in Python 3.0.
++ The :class:`MutableString` class has been removed in Python 3.
+
+ In addition to supporting the methods and operations of string and Unicode
+ objects (see section :ref:`string-methods`), :class:`UserString` instances
+diff -r 70274d53c1dd Doc/library/warnings.rst
+--- a/Doc/library/warnings.rst
++++ b/Doc/library/warnings.rst
+@@ -167,7 +167,8 @@
+ the command-line options passed to :option:`-W` and calls to
+ :func:`filterwarnings`.
+
+-* :exc:`PendingDeprecationWarning`, and :exc:`ImportWarning` are ignored.
++* :exc:`DeprecationWarning` and :exc:`PendingDeprecationWarning`, and
++ :exc:`ImportWarning` are ignored.
+
+ * :exc:`BytesWarning` is ignored unless the :option:`-b` option is given once or
+ twice; in this case this warning is either printed (``-b``) or turned into an
+@@ -418,7 +419,7 @@
+
+ .. note::
+
+- In Python 3.0, the arguments to the constructor for
++ In Python 3, the arguments to the constructor for
+ :class:`catch_warnings` are keyword-only arguments.
+
+ .. versionadded:: 2.6
+diff -r 70274d53c1dd Doc/library/webbrowser.rst
+--- a/Doc/library/webbrowser.rst
++++ b/Doc/library/webbrowser.rst
+@@ -36,7 +36,9 @@
+ module. It accepts an URL as the argument. It accepts the following optional
+ parameters: ``-n`` opens the URL in a new browser window, if possible;
+ ``-t`` opens the URL in a new browser page ("tab"). The options are,
+-naturally, mutually exclusive.
++naturally, mutually exclusive. Usage example::
++
++ python -m webbrowser -t "http://www.python.org"
+
+ The following exception is defined:
+
+@@ -48,7 +50,7 @@
+ The following functions are defined:
+
+
+-.. function:: open(url[, new=0[, autoraise=True]])
++.. function:: open(url, new=0, autoraise=True)
+
+ Display *url* using the default browser. If *new* is 0, the *url* is opened
+ in the same browser window if possible. If *new* is 1, a new browser window
+@@ -138,9 +140,9 @@
+ +-----------------------+-----------------------------------------+-------+
+ | ``'windows-default'`` | :class:`WindowsDefault` | \(2) |
+ +-----------------------+-----------------------------------------+-------+
+-| ``'internet-config'`` | :class:`InternetConfig` | \(3) |
++| ``'macosx'`` | :class:`MacOSX('default')` | \(3) |
+ +-----------------------+-----------------------------------------+-------+
+-| ``'macosx'`` | :class:`MacOSX('default')` | \(4) |
++| ``'safari'`` | :class:`MacOSX('safari')` | \(3) |
+ +-----------------------+-----------------------------------------+-------+
+
+ Notes:
+@@ -156,9 +158,6 @@
+ Only on Windows platforms.
+
+ (3)
+- Only on Mac OS platforms; requires the standard MacPython :mod:`ic` module.
+-
+-(4)
+ Only on Mac OS X platform.
+
+ Here are some simple examples::
+@@ -181,7 +180,7 @@
+ module-level convenience functions:
+
+
+-.. method:: controller.open(url[, new=0[, autoraise=True]])
++.. method:: controller.open(url, new=0, autoraise=True)
+
+ Display *url* using the browser handled by this controller. If *new* is 1, a new
+ browser window is opened if possible. If *new* is 2, a new browser page ("tab")
+diff -r 70274d53c1dd Doc/library/whichdb.rst
+--- a/Doc/library/whichdb.rst
++++ b/Doc/library/whichdb.rst
+@@ -6,8 +6,8 @@
+
+ .. note::
+ The :mod:`whichdb` module's only function has been put into the :mod:`dbm`
+- module in Python 3.0. The :term:`2to3` tool will automatically adapt imports
+- when converting your sources to 3.0.
++ module in Python 3. The :term:`2to3` tool will automatically adapt imports
++ when converting your sources to Python 3.
+
+
+ The single function in this module attempts to guess which of the several simple
+diff -r 70274d53c1dd Doc/library/wsgiref.rst
+--- a/Doc/library/wsgiref.rst
++++ b/Doc/library/wsgiref.rst
+@@ -59,7 +59,7 @@
+ found, and "http" otherwise.
+
+
+-.. function:: request_uri(environ [, include_query=1])
++.. function:: request_uri(environ, include_query=1)
+
+ Return the full request URI, optionally including the query string, using the
+ algorithm found in the "URL Reconstruction" section of :pep:`333`. If
+@@ -148,7 +148,7 @@
+ :rfc:`2616`.
+
+
+-.. class:: FileWrapper(filelike [, blksize=8192])
++.. class:: FileWrapper(filelike, blksize=8192)
+
+ A wrapper to convert a file-like object to an :term:`iterator`. The resulting objects
+ support both :meth:`__getitem__` and :meth:`__iter__` iteration styles, for
+@@ -271,7 +271,7 @@
+ :mod:`wsgiref.util`.)
+
+
+-.. function:: make_server(host, port, app [, server_class=WSGIServer [, handler_class=WSGIRequestHandler]])
++.. function:: make_server(host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler)
+
+ Create a new WSGI server listening on *host* and *port*, accepting connections
+ for *app*. The return value is an instance of the supplied *server_class*, and
+@@ -460,7 +460,7 @@
+ environment.
+
+
+-.. class:: BaseCGIHandler(stdin, stdout, stderr, environ [, multithread=True [, multiprocess=False]])
++.. class:: BaseCGIHandler(stdin, stdout, stderr, environ, multithread=True, multiprocess=False)
+
+ Similar to :class:`CGIHandler`, but instead of using the :mod:`sys` and
+ :mod:`os` modules, the CGI environment and I/O streams are specified explicitly.
+@@ -475,7 +475,7 @@
+ instead of :class:`SimpleHandler`.
+
+
+-.. class:: SimpleHandler(stdin, stdout, stderr, environ [,multithread=True [, multiprocess=False]])
++.. class:: SimpleHandler(stdin, stdout, stderr, environ, multithread=True, multiprocess=False)
+
+ Similar to :class:`BaseCGIHandler`, but designed for use with HTTP origin
+ servers. If you are writing an HTTP server implementation, you will probably
+diff -r 70274d53c1dd Doc/library/xml.dom.minidom.rst
+--- a/Doc/library/xml.dom.minidom.rst
++++ b/Doc/library/xml.dom.minidom.rst
+@@ -18,6 +18,14 @@
+ Model interface. It is intended to be simpler than the full DOM and also
+ significantly smaller.
+
++.. note::
++
++ The :mod:`xml.dom.minidom` module provides an implementation of the W3C-DOM,
++ with an API similar to that in other programming languages. Users who are
++ unfamiliar with the W3C-DOM interface or who would like to write less code
++ for processing XML files should consider using the
++ :mod:`xml.etree.ElementTree` module instead.
++
+ DOM applications typically start by parsing some XML into a DOM. With
+ :mod:`xml.dom.minidom`, this is done through the parse functions::
+
+@@ -121,7 +129,7 @@
+ to discard children of that node.
+
+
+-.. method:: Node.writexml(writer[, indent=""[, addindent=""[, newl=""]]])
++.. method:: Node.writexml(writer, indent="", addindent="", newl="")
+
+ Write XML to the writer object. The writer should have a :meth:`write` method
+ which matches that of the file object interface. The *indent* parameter is the
+diff -r 70274d53c1dd Doc/library/xmlrpclib.rst
+--- a/Doc/library/xmlrpclib.rst
++++ b/Doc/library/xmlrpclib.rst
+@@ -8,8 +8,8 @@
+
+ .. note::
+ The :mod:`xmlrpclib` module has been renamed to :mod:`xmlrpc.client` in
+- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
+- converting your sources to 3.0.
++ Python 3. The :term:`2to3` tool will automatically adapt imports when
++ converting your sources to Python 3.
+
+
+ .. XXX Not everything is documented yet. It might be good to describe
+diff -r 70274d53c1dd Doc/library/zipfile.rst
+--- a/Doc/library/zipfile.rst
++++ b/Doc/library/zipfile.rst
+@@ -25,9 +25,6 @@
+ create an encrypted file. Decryption is extremely slow as it is
+ implemented in native Python rather than C.
+
+-For other archive formats, see the :mod:`bz2`, :mod:`gzip`, and
+-:mod:`tarfile` modules.
+-
+ The module defines the following items:
+
+ .. exception:: BadZipfile
+diff -r 70274d53c1dd Doc/library/zlib.rst
+--- a/Doc/library/zlib.rst
++++ b/Doc/library/zlib.rst
+@@ -19,9 +19,7 @@
+ consult the zlib manual at http://www.zlib.net/manual.html for authoritative
+ information.
+
+-For reading and writing ``.gz`` files see the :mod:`gzip` module. For
+-other archive formats, see the :mod:`bz2`, :mod:`zipfile`, and
+-:mod:`tarfile` modules.
++For reading and writing ``.gz`` files see the :mod:`gzip` module.
+
+ The available exception and functions in this module are:
+
+diff -r 70274d53c1dd Doc/reference/compound_stmts.rst
+--- a/Doc/reference/compound_stmts.rst
++++ b/Doc/reference/compound_stmts.rst
+@@ -290,15 +290,28 @@
+
+ .. index:: keyword: finally
+
+-If :keyword:`finally` is present, it specifies a 'cleanup' handler. The
+-:keyword:`try` clause is executed, including any :keyword:`except` and
+-:keyword:`else` clauses. If an exception occurs in any of the clauses and is
+-not handled, the exception is temporarily saved. The :keyword:`finally` clause
+-is executed. If there is a saved exception, it is re-raised at the end of the
+-:keyword:`finally` clause. If the :keyword:`finally` clause raises another
+-exception or executes a :keyword:`return` or :keyword:`break` statement, the
+-saved exception is lost. The exception information is not available to the
+-program during execution of the :keyword:`finally` clause.
++If :keyword:`finally` is present, it specifies a 'cleanup' handler.
++The :keyword:`try` clause is executed, including any :keyword:`except`
++and :keyword:`else` clauses. If an exception occurs in any of the
++clauses and is not handled, the exception is temporarily saved. The
++:keyword:`finally` clause is executed. If there is a saved exception
++or :keyword:`break` statement, it is re-raised at the end of the
++:keyword:`finally` clause. If the :keyword:`finally` clause raises
++another exception the saved exception is set as the context of the new
++exception; if the :keyword:`finally` clause executes a
++:keyword:`return` statement, the saved exception is discarded::
++
++ def f():
++ try:
++ 1/0
++ finally:
++ return 42
++
++ >>> f()
++ 42
++
++The exception information is not available to the program during execution of
++the :keyword:`finally` clause.
+
+ .. index::
+ statement: return
+diff -r 70274d53c1dd Doc/reference/datamodel.rst
+--- a/Doc/reference/datamodel.rst
++++ b/Doc/reference/datamodel.rst
+@@ -573,7 +573,7 @@
+ :attr:`im_self` used to refer to the class that defined the method.
+
+ .. versionchanged:: 2.6
+- For 3.0 forward-compatibility, :attr:`im_func` is also available as
++ For Python 3 forward-compatibility, :attr:`im_func` is also available as
+ :attr:`__func__`, and :attr:`im_self` as :attr:`__self__`.
+
+ .. index::
+@@ -1149,7 +1149,7 @@
+ single: class; classic
+ single: class; old-style
+
+-Old-style classes are removed in Python 3.0, leaving only the semantics of
++Old-style classes are removed in Python 3, leaving only the semantics of
+ new-style classes.
+
+
+@@ -2235,7 +2235,7 @@
+ This section used to document the rules for coercion. As the language has
+ evolved, the coercion rules have become hard to document precisely; documenting
+ what one version of one particular implementation does is undesirable. Instead,
+-here are some informal guidelines regarding coercion. In Python 3.0, coercion
++here are some informal guidelines regarding coercion. In Python 3, coercion
+ will not be supported.
+
+ *
+diff -r 70274d53c1dd Doc/reference/expressions.rst
+--- a/Doc/reference/expressions.rst
++++ b/Doc/reference/expressions.rst
+@@ -1392,7 +1392,7 @@
+
+ .. [#] In Python 2.3 and later releases, a list comprehension "leaks" the control
+ variables of each ``for`` it contains into the containing scope. However, this
+- behavior is deprecated, and relying on it will not work in Python 3.0
++ behavior is deprecated, and relying on it will not work in Python 3.
+
+ .. [#] While ``abs(x%y) < abs(y)`` is true mathematically, for floats it may not be
+ true numerically due to roundoff. For example, and assuming a platform on which
+diff -r 70274d53c1dd Doc/reference/simple_stmts.rst
+--- a/Doc/reference/simple_stmts.rst
++++ b/Doc/reference/simple_stmts.rst
+@@ -993,6 +993,9 @@
+ it should be a dictionary, which will be used for both the global and the local
+ variables. If two expressions are given, they are used for the global and local
+ variables, respectively. If provided, *locals* can be any mapping object.
++Remember that at module level, globals and locals are the same dictionary. If
++two separate objects are given as *globals* and *locals*, the code will be
++executed as if it were embedded in a class definition.
+
+ .. versionchanged:: 2.4
+ Formerly, *locals* was required to be a dictionary.
+diff -r 70274d53c1dd Doc/tools/sphinxext/download.html
+--- a/Doc/tools/sphinxext/download.html
++++ b/Doc/tools/sphinxext/download.html
+@@ -35,8 +35,12 @@
+ </tr>
+ </table>
+
++<p>These archives contain all the content in the documentation.</p>
+
+-<p>These archives contain all the content in the documentation.</p>
++<p>HTML Help (<tt>.chm</tt>) files are made available in the "Windows" section
++on the <a href="http://python.org/download/releases/{{ release[:5] }}/">Python
++download page</a>.</p>
++
+
+ <h2>Unpacking</h2>
+
+diff -r 70274d53c1dd Doc/tools/sphinxext/indexsidebar.html
+--- a/Doc/tools/sphinxext/indexsidebar.html
++++ b/Doc/tools/sphinxext/indexsidebar.html
+@@ -11,13 +11,8 @@
+ <h3>Other resources</h3>
+ <ul>
+ {# XXX: many of these should probably be merged in the main docs #}
+- <li><a href="http://www.python.org/doc/faq/">FAQs</a></li>
+- <li><a href="http://www.python.org/doc/essays/">Guido's Essays</a></li>
+- <li><a href="http://www.python.org/doc/newstyle/">New-style Classes</a></li>
+ <li><a href="http://www.python.org/dev/peps/">PEP Index</a></li>
+ <li><a href="http://wiki.python.org/moin/BeginnersGuide">Beginner's Guide</a></li>
+ <li><a href="http://wiki.python.org/moin/PythonBooks">Book List</a></li>
+ <li><a href="http://www.python.org/doc/av/">Audio/Visual Talks</a></li>
+- <li><a href="http://www.python.org/doc/other/">Other Doc Collections</a></li>
+- <li><a href="{{ pathto('bugs') }}">Report a Bug</a></li>
+ </ul>
+diff -r 70274d53c1dd Doc/tutorial/classes.rst
+--- a/Doc/tutorial/classes.rst
++++ b/Doc/tutorial/classes.rst
+@@ -534,8 +534,8 @@
+
+ .. _tut-private:
+
+-Private Variables
+-=================
++Private Variables and Class-local References
++============================================
+
+ "Private" instance variables that cannot be accessed except from inside an
+ object don't exist in Python. However, there is a convention that is followed
+diff -r 70274d53c1dd Doc/tutorial/controlflow.rst
+--- a/Doc/tutorial/controlflow.rst
++++ b/Doc/tutorial/controlflow.rst
+@@ -129,9 +129,6 @@
+ The :keyword:`break` statement, like in C, breaks out of the smallest enclosing
+ :keyword:`for` or :keyword:`while` loop.
+
+-The :keyword:`continue` statement, also borrowed from C, continues with the next
+-iteration of the loop.
+-
+ Loop statements may have an ``else`` clause; it is executed when the loop
+ terminates through exhaustion of the list (with :keyword:`for`) or when the
+ condition becomes false (with :keyword:`while`), but not when the loop is
+@@ -159,6 +156,29 @@
+ (Yes, this is the correct code. Look closely: the ``else`` clause belongs to
+ the :keyword:`for` loop, **not** the :keyword:`if` statement.)
+
++When used with a loop, the ``else`` clause has more in common with the
++``else`` clause of a :keyword:`try` statement than it does that of
++:keyword:`if` statements: a :keyword:`try` statement's ``else`` clause runs
++when no exception occurs, and a loop's ``else`` clause runs when no ``break``
++occurs. For more on the :keyword:`try` statement and exceptions, see
++:ref:`tut-handling`.
++
++The :keyword:`continue` statement, also borrowed from C, continues with the next
++iteration of the loop::
++
++ >>> for num in range(2, 10):
++ ... if x % 2 == 0:
++ ... print("Found an even number", num)
++ ... continue
++ ... print("Found a number", num)
++ Found an even number 2
++ Found a number 3
++ Found an even number 4
++ Found a number 5
++ Found an even number 6
++ Found a number 7
++ Found an even number 8
++ Found a number 9
+
+ .. _tut-pass:
+
+diff -r 70274d53c1dd Doc/tutorial/datastructures.rst
+--- a/Doc/tutorial/datastructures.rst
++++ b/Doc/tutorial/datastructures.rst
+@@ -423,17 +423,31 @@
+ ... u = t, (1, 2, 3, 4, 5)
+ >>> u
+ ((12345, 54321, 'hello!'), (1, 2, 3, 4, 5))
++ >>> # Tuples are immutable:
++ ... t[0] = 88888
++ Traceback (most recent call last):
++ File "<stdin>", line 1, in <module>
++ TypeError: 'tuple' object does not support item assignment
++ >>> # but they can contain mutable objects:
++ ... v = ([1, 2, 3], [3, 2, 1])
++ >>> v
++ ([1, 2, 3], [3, 2, 1])
++
+
+ As you see, on output tuples are always enclosed in parentheses, so that nested
+ tuples are interpreted correctly; they may be input with or without surrounding
+ parentheses, although often parentheses are necessary anyway (if the tuple is
+-part of a larger expression).
++part of a larger expression). It is not possible to assign to the individual
++items of a tuple, however it is possible to create tuples which contain mutable
++objects, such as lists.
+
+-Tuples have many uses. For example: (x, y) coordinate pairs, employee records
+-from a database, etc. Tuples, like strings, are immutable: it is not possible
+-to assign to the individual items of a tuple (you can simulate much of the same
+-effect with slicing and concatenation, though). It is also possible to create
+-tuples which contain mutable objects, such as lists.
++Though tuples may seem similar to lists, they are often used in different
++situations and for different purposes.
++Tuples are :term:`immutable`, and usually contain an heterogeneous sequence of
++elements that are accessed via unpacking (see later in this section) or indexing
++(or even by attribute in the case of :func:`namedtuples <collections.namedtuple>`).
++Lists are :term:`mutable`, and their elements are usually homogeneous and are
++accessed by iterating over the list.
+
+ A special problem is the construction of tuples containing 0 or 1 items: the
+ syntax has some extra quirks to accommodate these. Empty tuples are constructed
+@@ -462,8 +476,6 @@
+ sequence. Note that multiple assignment is really just a combination of tuple
+ packing and sequence unpacking.
+
+-.. XXX Add a bit on the difference between tuples and lists.
+-
+
+ .. _tut-sets:
+
+@@ -577,16 +589,6 @@
+ Looping Techniques
+ ==================
+
+-When looping through dictionaries, the key and corresponding value can be
+-retrieved at the same time using the :meth:`iteritems` method. ::
+-
+- >>> knights = {'gallahad': 'the pure', 'robin': 'the brave'}
+- >>> for k, v in knights.iteritems():
+- ... print k, v
+- ...
+- gallahad the pure
+- robin the brave
+-
+ When looping through a sequence, the position index and corresponding value can
+ be retrieved at the same time using the :func:`enumerate` function. ::
+
+@@ -633,6 +635,16 @@
+ orange
+ pear
+
++When looping through dictionaries, the key and corresponding value can be
++retrieved at the same time using the :meth:`iteritems` method. ::
++
++ >>> knights = {'gallahad': 'the pure', 'robin': 'the brave'}
++ >>> for k, v in knights.iteritems():
++ ... print k, v
++ ...
++ gallahad the pure
++ robin the brave
++
+
+ .. _tut-conditions:
+
+diff -r 70274d53c1dd Doc/tutorial/errors.rst
+--- a/Doc/tutorial/errors.rst
++++ b/Doc/tutorial/errors.rst
+@@ -120,6 +120,14 @@
+ ... except (RuntimeError, TypeError, NameError):
+ ... pass
+
++Note that the parentheses around this tuple are required, because
++``except ValueError, e:`` was the syntax used for what is normally
++written as ``except ValueError as e:`` in modern Python (described
++below). The old syntax is still supported for backwards compatibility.
++This means ``except RuntimeError, TypeError`` is not equivalent to
++``except (RuntimeError, TypeError):`` but to ``except RuntimeError as
++TypeError:`` which is not what you want.
++
+ The last except clause may omit the exception name(s), to serve as a wildcard.
+ Use this with extreme caution, since it is easy to mask a real programming error
+ in this way! It can also be used to print an error message and then re-raise
+@@ -131,8 +139,8 @@
+ f = open('myfile.txt')
+ s = f.readline()
+ i = int(s.strip())
+- except IOError as (errno, strerror):
+- print "I/O error({0}): {1}".format(errno, strerror)
++ except IOError as e:
++ print "I/O error({0}): {1}".format(e.errno, e.strerror)
+ except ValueError:
+ print "Could not convert data to an integer."
+ except:
+@@ -177,7 +185,7 @@
+ ... print type(inst) # the exception instance
+ ... print inst.args # arguments stored in .args
+ ... print inst # __str__ allows args to printed directly
+- ... x, y = inst # __getitem__ allows args to be unpacked directly
++ ... x, y = inst.args
+ ... print 'x =', x
+ ... print 'y =', y
+ ...
+diff -r 70274d53c1dd Doc/tutorial/inputoutput.rst
+--- a/Doc/tutorial/inputoutput.rst
++++ b/Doc/tutorial/inputoutput.rst
+@@ -37,7 +37,7 @@
+ The :func:`str` function is meant to return representations of values which are
+ fairly human-readable, while :func:`repr` is meant to generate representations
+ which can be read by the interpreter (or will force a :exc:`SyntaxError` if
+-there is not equivalent syntax). For objects which don't have a particular
++there is no equivalent syntax). For objects which don't have a particular
+ representation for human consumption, :func:`str` will return the same value as
+ :func:`repr`. Many values, such as numbers or structures like lists and
+ dictionaries, have the same representation using either function. Strings and
+diff -r 70274d53c1dd Doc/tutorial/modules.rst
+--- a/Doc/tutorial/modules.rst
++++ b/Doc/tutorial/modules.rst
+@@ -242,7 +242,7 @@
+ are not part of the core of the language but are nevertheless built in, either
+ for efficiency or to provide access to operating system primitives such as
+ system calls. The set of such modules is a configuration option which also
+-depends on the underlying platform For example, the :mod:`winreg` module is only
++depends on the underlying platform. For example, the :mod:`winreg` module is only
+ provided on Windows systems. One particular module deserves some attention:
+ :mod:`sys`, which is built into every Python interpreter. The variables
+ ``sys.ps1`` and ``sys.ps2`` define the strings used as primary and secondary
+diff -r 70274d53c1dd Doc/using/cmdline.rst
+--- a/Doc/using/cmdline.rst
++++ b/Doc/using/cmdline.rst
+@@ -541,7 +541,8 @@
+ .. envvar:: PYTHONDONTWRITEBYTECODE
+
+ If this is set, Python won't try to write ``.pyc`` or ``.pyo`` files on the
+- import of source modules.
++ import of source modules. This is equivalent to specifying the :option:`-B`
++ option.
+
+ .. versionadded:: 2.6
+
+diff -r 70274d53c1dd Doc/using/unix.rst
+--- a/Doc/using/unix.rst
++++ b/Doc/using/unix.rst
+@@ -145,7 +145,7 @@
+ * http://sourceforge.net/projects/python-mode
+
+ Geany is an excellent IDE with support for a lot of languages. For more
+-information, read: http://geany.uvena.de/
++information, read: http://www.geany.org/
+
+ Komodo edit is another extremely good IDE. It also has support for a lot of
+ languages. For more information, read:
+diff -r 70274d53c1dd Include/node.h
+--- a/Include/node.h
++++ b/Include/node.h
+@@ -20,6 +20,9 @@
+ PyAPI_FUNC(int) PyNode_AddChild(node *n, int type,
+ char *str, int lineno, int col_offset);
+ PyAPI_FUNC(void) PyNode_Free(node *n);
++#ifndef Py_LIMITED_API
++Py_ssize_t _PyNode_SizeOf(node *n);
++#endif
+
+ /* Node access functions */
+ #define NCH(n) ((n)->n_nchildren)
+diff -r 70274d53c1dd Include/patchlevel.h
+--- a/Include/patchlevel.h
++++ b/Include/patchlevel.h
+@@ -6,7 +6,7 @@
+ defined(PY_MAJOR_VERSION).
+
+ When the major or minor version changes, the VERSION variable in
+- configure.in must also be changed.
++ configure.ac must also be changed.
+
+ There is also (independent) API version information in modsupport.h.
+ */
+diff -r 70274d53c1dd Include/pyport.h
+--- a/Include/pyport.h
++++ b/Include/pyport.h
+@@ -549,6 +549,30 @@
+ _Py_set_387controlword(old_387controlword)
+ #endif
+
++/* get and set x87 control word for VisualStudio/x86 */
++#if defined(_MSC_VER) && !defined(_WIN64) /* x87 not supported in 64-bit */
++#define HAVE_PY_SET_53BIT_PRECISION 1
++#define _Py_SET_53BIT_PRECISION_HEADER \
++ unsigned int old_387controlword, new_387controlword, out_387controlword
++/* We use the __control87_2 function to set only the x87 control word.
++ The SSE control word is unaffected. */
++#define _Py_SET_53BIT_PRECISION_START \
++ do { \
++ __control87_2(0, 0, &old_387controlword, NULL); \
++ new_387controlword = \
++ (old_387controlword & ~(_MCW_PC | _MCW_RC)) | (_PC_53 | _RC_NEAR); \
++ if (new_387controlword != old_387controlword) \
++ __control87_2(new_387controlword, _MCW_PC | _MCW_RC, \
++ &out_387controlword, NULL); \
++ } while (0)
++#define _Py_SET_53BIT_PRECISION_END \
++ do { \
++ if (new_387controlword != old_387controlword) \
++ __control87_2(old_387controlword, _MCW_PC | _MCW_RC, \
++ &out_387controlword, NULL); \
++ } while (0)
++#endif
++
+ /* default definitions are empty */
+ #ifndef HAVE_PY_SET_53BIT_PRECISION
+ #define _Py_SET_53BIT_PRECISION_HEADER
+diff -r 70274d53c1dd Lib/BaseHTTPServer.py
+--- a/Lib/BaseHTTPServer.py
++++ b/Lib/BaseHTTPServer.py
+@@ -447,13 +447,13 @@
+ specified as subsequent arguments (it's just like
+ printf!).
+
+- The client host and current date/time are prefixed to
+- every message.
++ The client ip address and current date/time are prefixed to every
++ message.
+
+ """
+
+ sys.stderr.write("%s - - [%s] %s\n" %
+- (self.address_string(),
++ (self.client_address[0],
+ self.log_date_time_string(),
+ format%args))
+
+diff -r 70274d53c1dd Lib/CGIHTTPServer.py
+--- a/Lib/CGIHTTPServer.py
++++ b/Lib/CGIHTTPServer.py
+@@ -84,9 +84,11 @@
+ path begins with one of the strings in self.cgi_directories
+ (and the next character is a '/' or the end of the string).
+ """
+- splitpath = _url_collapse_path_split(self.path)
+- if splitpath[0] in self.cgi_directories:
+- self.cgi_info = splitpath
++ collapsed_path = _url_collapse_path(self.path)
++ dir_sep = collapsed_path.find('/', 1)
++ head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
++ if head in self.cgi_directories:
++ self.cgi_info = head, tail
+ return True
+ return False
+
+@@ -298,44 +300,46 @@
+ self.log_message("CGI script exited OK")
+
+
+-# TODO(gregory.p.smith): Move this into an appropriate library.
+-def _url_collapse_path_split(path):
++def _url_collapse_path(path):
+ """
+ Given a URL path, remove extra '/'s and '.' path elements and collapse
+- any '..' references.
++ any '..' references and returns a colllapsed path.
+
+ Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.
++ The utility of this function is limited to is_cgi method and helps
++ preventing some security attacks.
+
+ Returns: A tuple of (head, tail) where tail is everything after the final /
+ and head is everything before it. Head will always start with a '/' and,
+ if it contains anything else, never have a trailing '/'.
+
+ Raises: IndexError if too many '..' occur within the path.
++
+ """
+ # Similar to os.path.split(os.path.normpath(path)) but specific to URL
+ # path semantics rather than local operating system semantics.
+- path_parts = []
+- for part in path.split('/'):
+- if part == '.':
+- path_parts.append('')
+- else:
+- path_parts.append(part)
+- # Filter out blank non trailing parts before consuming the '..'.
+- path_parts = [part for part in path_parts[:-1] if part] + path_parts[-1:]
++ path_parts = path.split('/')
++ head_parts = []
++ for part in path_parts[:-1]:
++ if part == '..':
++ head_parts.pop() # IndexError if more '..' than prior parts
++ elif part and part != '.':
++ head_parts.append( part )
+ if path_parts:
+ tail_part = path_parts.pop()
++ if tail_part:
++ if tail_part == '..':
++ head_parts.pop()
++ tail_part = ''
++ elif tail_part == '.':
++ tail_part = ''
+ else:
+ tail_part = ''
+- head_parts = []
+- for part in path_parts:
+- if part == '..':
+- head_parts.pop()
+- else:
+- head_parts.append(part)
+- if tail_part and tail_part == '..':
+- head_parts.pop()
+- tail_part = ''
+- return ('/' + '/'.join(head_parts), tail_part)
++
++ splitpath = ('/' + '/'.join(head_parts), tail_part)
++ collapsed_path = "/".join(splitpath)
++
++ return collapsed_path
+
+
+ nobody = None
+diff -r 70274d53c1dd Lib/Cookie.py
+--- a/Lib/Cookie.py
++++ b/Lib/Cookie.py
+@@ -390,7 +390,7 @@
+ from time import gmtime, time
+ now = time()
+ year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
+- return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
++ return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
+ (weekdayname[wd], day, monthname[month], year, hh, mm, ss)
+
+
+@@ -539,7 +539,7 @@
+ r"(?P<val>" # Start of group 'val'
+ r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
+ r"|" # or
+- r"\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr
++ r"\w{3},\s[\s\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr
+ r"|" # or
+ ""+ _LegalCharsPatt +"*" # Any word or empty string
+ r")" # End of group 'val'
+diff -r 70274d53c1dd Lib/HTMLParser.py
+--- a/Lib/HTMLParser.py
++++ b/Lib/HTMLParser.py
+@@ -22,13 +22,13 @@
+ starttagopen = re.compile('<[a-zA-Z]')
+ piclose = re.compile('>')
+ commentclose = re.compile(r'--\s*>')
+-tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
++tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
+ # see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
+ # and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
+ tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
+
+ attrfind = re.compile(
+- r'[\s/]*((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
++ r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
+ r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
+
+ locatestarttagend = re.compile(r"""
+@@ -289,7 +289,7 @@
+ match = tagfind.match(rawdata, i+1)
+ assert match, 'unexpected call to parse_starttag()'
+ k = match.end()
+- self.lasttag = tag = rawdata[i+1:k].lower()
++ self.lasttag = tag = match.group(1).lower()
+
+ while k < endpos:
+ m = attrfind.match(rawdata, k)
+diff -r 70274d53c1dd Lib/SimpleXMLRPCServer.py
+--- a/Lib/SimpleXMLRPCServer.py
++++ b/Lib/SimpleXMLRPCServer.py
+@@ -1,4 +1,4 @@
+-"""Simple XML-RPC Server.
++r"""Simple XML-RPC Server.
+
+ This module can be used to create simple XML-RPC servers
+ by creating a server and either installing functions, a
+diff -r 70274d53c1dd Lib/SocketServer.py
+--- a/Lib/SocketServer.py
++++ b/Lib/SocketServer.py
+@@ -133,6 +133,7 @@
+ import select
+ import sys
+ import os
++import errno
+ try:
+ import threading
+ except ImportError:
+@@ -147,6 +148,15 @@
+ "ThreadingUnixStreamServer",
+ "ThreadingUnixDatagramServer"])
+
++def _eintr_retry(func, *args):
++ """restart a system call interrupted by EINTR"""
++ while True:
++ try:
++ return func(*args)
++ except (OSError, select.error) as e:
++ if e.args[0] != errno.EINTR:
++ raise
++
+ class BaseServer:
+
+ """Base class for server classes.
+@@ -222,7 +232,8 @@
+ # connecting to the socket to wake this up instead of
+ # polling. Polling reduces our responsiveness to a
+ # shutdown request and wastes cpu at all other times.
+- r, w, e = select.select([self], [], [], poll_interval)
++ r, w, e = _eintr_retry(select.select, [self], [], [],
++ poll_interval)
+ if self in r:
+ self._handle_request_noblock()
+ finally:
+@@ -262,7 +273,7 @@
+ timeout = self.timeout
+ elif self.timeout is not None:
+ timeout = min(timeout, self.timeout)
+- fd_sets = select.select([self], [], [], timeout)
++ fd_sets = _eintr_retry(select.select, [self], [], [], timeout)
+ if not fd_sets[0]:
+ self.handle_timeout()
+ return
+diff -r 70274d53c1dd Lib/__future__.py
+--- a/Lib/__future__.py
++++ b/Lib/__future__.py
+@@ -112,7 +112,7 @@
+ CO_FUTURE_DIVISION)
+
+ absolute_import = _Feature((2, 5, 0, "alpha", 1),
+- (2, 7, 0, "alpha", 0),
++ (3, 0, 0, "alpha", 0),
+ CO_FUTURE_ABSOLUTE_IMPORT)
+
+ with_statement = _Feature((2, 5, 0, "alpha", 1),
+diff -r 70274d53c1dd Lib/_pyio.py
+--- a/Lib/_pyio.py
++++ b/Lib/_pyio.py
+@@ -1451,7 +1451,7 @@
+ enabled. With this enabled, on input, the lines endings '\n', '\r',
+ or '\r\n' are translated to '\n' before being returned to the
+ caller. Conversely, on output, '\n' is translated to the system
+- default line seperator, os.linesep. If newline is any other of its
++ default line separator, os.linesep. If newline is any other of its
+ legal values, that newline becomes the newline when the file is read
+ and it is returned untranslated. On output, '\n' is converted to the
+ newline.
+diff -r 70274d53c1dd Lib/_strptime.py
+--- a/Lib/_strptime.py
++++ b/Lib/_strptime.py
+@@ -326,7 +326,8 @@
+ if len(data_string) != found.end():
+ raise ValueError("unconverted data remains: %s" %
+ data_string[found.end():])
+- year = 1900
++
++ year = None
+ month = day = 1
+ hour = minute = second = fraction = 0
+ tz = -1
+@@ -425,6 +426,12 @@
+ else:
+ tz = value
+ break
++ leap_year_fix = False
++ if year is None and month == 2 and day == 29:
++ year = 1904 # 1904 is first leap year of 20th century
++ leap_year_fix = True
++ elif year is None:
++ year = 1900
+ # If we know the week of the year and what day of that week, we can figure
+ # out the Julian day of the year.
+ if julian == -1 and week_of_year != -1 and weekday != -1:
+@@ -446,6 +453,12 @@
+ day = datetime_result.day
+ if weekday == -1:
+ weekday = datetime_date(year, month, day).weekday()
++ if leap_year_fix:
++ # the caller didn't supply a year but asked for Feb 29th. We couldn't
++ # use the default of 1900 for computations. We set it back to ensure
++ # that February 29th is smaller than March 1st.
++ year = 1900
++
+ return (time.struct_time((year, month, day,
+ hour, minute, second,
+ weekday, julian, tz)), fraction)
+diff -r 70274d53c1dd Lib/_weakrefset.py
+--- a/Lib/_weakrefset.py
++++ b/Lib/_weakrefset.py
+@@ -63,7 +63,7 @@
+ yield item
+
+ def __len__(self):
+- return sum(x() is not None for x in self.data)
++ return len(self.data) - len(self._pending_removals)
+
+ def __contains__(self, item):
+ try:
+@@ -116,36 +116,21 @@
+ def update(self, other):
+ if self._pending_removals:
+ self._commit_removals()
+- if isinstance(other, self.__class__):
+- self.data.update(other.data)
+- else:
+- for element in other:
+- self.add(element)
++ for element in other:
++ self.add(element)
+
+ def __ior__(self, other):
+ self.update(other)
+ return self
+
+- # Helper functions for simple delegating methods.
+- def _apply(self, other, method):
+- if not isinstance(other, self.__class__):
+- other = self.__class__(other)
+- newdata = method(other.data)
+- newset = self.__class__()
+- newset.data = newdata
++ def difference(self, other):
++ newset = self.copy()
++ newset.difference_update(other)
+ return newset
+-
+- def difference(self, other):
+- return self._apply(other, self.data.difference)
+ __sub__ = difference
+
+ def difference_update(self, other):
+- if self._pending_removals:
+- self._commit_removals()
+- if self is other:
+- self.data.clear()
+- else:
+- self.data.difference_update(ref(item) for item in other)
++ self.__isub__(other)
+ def __isub__(self, other):
+ if self._pending_removals:
+ self._commit_removals()
+@@ -156,13 +141,11 @@
+ return self
+
+ def intersection(self, other):
+- return self._apply(other, self.data.intersection)
++ return self.__class__(item for item in other if item in self)
+ __and__ = intersection
+
+ def intersection_update(self, other):
+- if self._pending_removals:
+- self._commit_removals()
+- self.data.intersection_update(ref(item) for item in other)
++ self.__iand__(other)
+ def __iand__(self, other):
+ if self._pending_removals:
+ self._commit_removals()
+@@ -171,17 +154,17 @@
+
+ def issubset(self, other):
+ return self.data.issubset(ref(item) for item in other)
+- __lt__ = issubset
++ __le__ = issubset
+
+- def __le__(self, other):
+- return self.data <= set(ref(item) for item in other)
++ def __lt__(self, other):
++ return self.data < set(ref(item) for item in other)
+
+ def issuperset(self, other):
+ return self.data.issuperset(ref(item) for item in other)
+- __gt__ = issuperset
++ __ge__ = issuperset
+
+- def __ge__(self, other):
+- return self.data >= set(ref(item) for item in other)
++ def __gt__(self, other):
++ return self.data > set(ref(item) for item in other)
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+@@ -189,27 +172,24 @@
+ return self.data == set(ref(item) for item in other)
+
+ def symmetric_difference(self, other):
+- return self._apply(other, self.data.symmetric_difference)
++ newset = self.copy()
++ newset.symmetric_difference_update(other)
++ return newset
+ __xor__ = symmetric_difference
+
+ def symmetric_difference_update(self, other):
+- if self._pending_removals:
+- self._commit_removals()
+- if self is other:
+- self.data.clear()
+- else:
+- self.data.symmetric_difference_update(ref(item) for item in other)
++ self.__ixor__(other)
+ def __ixor__(self, other):
+ if self._pending_removals:
+ self._commit_removals()
+ if self is other:
+ self.data.clear()
+ else:
+- self.data.symmetric_difference_update(ref(item) for item in other)
++ self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
+ return self
+
+ def union(self, other):
+- return self._apply(other, self.data.union)
++ return self.__class__(e for s in (self, other) for e in s)
+ __or__ = union
+
+ def isdisjoint(self, other):
+diff -r 70274d53c1dd Lib/argparse.py
+--- a/Lib/argparse.py
++++ b/Lib/argparse.py
+@@ -740,10 +740,10 @@
+
+ - default -- The value to be produced if the option is not specified.
+
+- - type -- The type which the command-line arguments should be converted
+- to, should be one of 'string', 'int', 'float', 'complex' or a
+- callable object that accepts a single string argument. If None,
+- 'string' is assumed.
++ - type -- A callable that accepts a single string argument, and
++ returns the converted value. The standard Python types str, int,
++ float, and complex are useful examples of such callables. If None,
++ str is used.
+
+ - choices -- A container of values that should be allowed. If not None,
+ after a command-line argument has been converted to the appropriate
+@@ -1967,7 +1967,7 @@
+ for arg_string in arg_strings:
+
+ # for regular arguments, just add them back into the list
+- if arg_string[0] not in self.fromfile_prefix_chars:
++ if not arg_string or arg_string[0] not in self.fromfile_prefix_chars:
+ new_arg_strings.append(arg_string)
+
+ # replace arguments referencing files with the file content
+@@ -2174,9 +2174,12 @@
+ # Value conversion methods
+ # ========================
+ def _get_values(self, action, arg_strings):
+- # for everything but PARSER args, strip out '--'
++ # for everything but PARSER, REMAINDER args, strip out first '--'
+ if action.nargs not in [PARSER, REMAINDER]:
+- arg_strings = [s for s in arg_strings if s != '--']
++ try:
++ arg_strings.remove('--')
++ except ValueError:
++ pass
+
+ # optional argument produces a default when not present
+ if not arg_strings and action.nargs == OPTIONAL:
+diff -r 70274d53c1dd Lib/asyncore.py
+--- a/Lib/asyncore.py
++++ b/Lib/asyncore.py
+@@ -225,6 +225,7 @@
+ debug = False
+ connected = False
+ accepting = False
++ connecting = False
+ closing = False
+ addr = None
+ ignore_log_types = frozenset(['warning'])
+@@ -248,7 +249,7 @@
+ try:
+ self.addr = sock.getpeername()
+ except socket.error, err:
+- if err.args[0] == ENOTCONN:
++ if err.args[0] in (ENOTCONN, EINVAL):
+ # To handle the case where we got an unconnected
+ # socket.
+ self.connected = False
+@@ -342,9 +343,11 @@
+
+ def connect(self, address):
+ self.connected = False
++ self.connecting = True
+ err = self.socket.connect_ex(address)
+ if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
+ or err == EINVAL and os.name in ('nt', 'ce'):
++ self.addr = address
+ return
+ if err in (0, EISCONN):
+ self.addr = address
+@@ -400,6 +403,7 @@
+ def close(self):
+ self.connected = False
+ self.accepting = False
++ self.connecting = False
+ self.del_channel()
+ try:
+ self.socket.close()
+@@ -438,7 +442,8 @@
+ # sockets that are connected
+ self.handle_accept()
+ elif not self.connected:
+- self.handle_connect_event()
++ if self.connecting:
++ self.handle_connect_event()
+ self.handle_read()
+ else:
+ self.handle_read()
+@@ -449,6 +454,7 @@
+ raise socket.error(err, _strerror(err))
+ self.handle_connect()
+ self.connected = True
++ self.connecting = False
+
+ def handle_write_event(self):
+ if self.accepting:
+@@ -457,12 +463,8 @@
+ return
+
+ if not self.connected:
+- #check for errors
+- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+- if err != 0:
+- raise socket.error(err, _strerror(err))
+-
+- self.handle_connect_event()
++ if self.connecting:
++ self.handle_connect_event()
+ self.handle_write()
+
+ def handle_expt_event(self):
+diff -r 70274d53c1dd Lib/bdb.py
+--- a/Lib/bdb.py
++++ b/Lib/bdb.py
+@@ -24,6 +24,7 @@
+ self.skip = set(skip) if skip else None
+ self.breaks = {}
+ self.fncache = {}
++ self.frame_returning = None
+
+ def canonic(self, filename):
+ if filename == "<" + filename[1:-1] + ">":
+@@ -82,7 +83,11 @@
+
+ def dispatch_return(self, frame, arg):
+ if self.stop_here(frame) or frame == self.returnframe:
+- self.user_return(frame, arg)
++ try:
++ self.frame_returning = frame
++ self.user_return(frame, arg)
++ finally:
++ self.frame_returning = None
+ if self.quitting: raise BdbQuit
+ return self.trace_dispatch
+
+@@ -186,6 +191,14 @@
+
+ def set_step(self):
+ """Stop after one line of code."""
++ # Issue #13183: pdb skips frames after hitting a breakpoint and running
++ # step commands.
++ # Restore the trace function in the caller (that may not have been set
++ # for performance reasons) when returning from the current frame.
++ if self.frame_returning:
++ caller_frame = self.frame_returning.f_back
++ if caller_frame and not caller_frame.f_trace:
++ caller_frame.f_trace = self.trace_dispatch
+ self._set_stopinfo(None, None)
+
+ def set_next(self, frame):
+diff -r 70274d53c1dd Lib/cmd.py
+--- a/Lib/cmd.py
++++ b/Lib/cmd.py
+@@ -294,6 +294,7 @@
+ return list(commands | topics)
+
+ def do_help(self, arg):
++ 'List available commands with "help" or detailed help with "help cmd".'
+ if arg:
+ # XXX check arg syntax
+ try:
+diff -r 70274d53c1dd Lib/collections.py
+--- a/Lib/collections.py
++++ b/Lib/collections.py
+@@ -234,10 +234,60 @@
+ ### namedtuple
+ ################################################################################
+
++_class_template = '''\
++class {typename}(tuple):
++ '{typename}({arg_list})'
++
++ __slots__ = ()
++
++ _fields = {field_names!r}
++
++ def __new__(_cls, {arg_list}):
++ 'Create new instance of {typename}({arg_list})'
++ return _tuple.__new__(_cls, ({arg_list}))
++
++ @classmethod
++ def _make(cls, iterable, new=tuple.__new__, len=len):
++ 'Make a new {typename} object from a sequence or iterable'
++ result = new(cls, iterable)
++ if len(result) != {num_fields:d}:
++ raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
++ return result
++
++ def __repr__(self):
++ 'Return a nicely formatted representation string'
++ return '{typename}({repr_fmt})' % self
++
++ def _asdict(self):
++ 'Return a new OrderedDict which maps field names to their values'
++ return OrderedDict(zip(self._fields, self))
++
++ __dict__ = property(_asdict)
++
++ def _replace(_self, **kwds):
++ 'Return a new {typename} object replacing specified fields with new values'
++ result = _self._make(map(kwds.pop, {field_names!r}, _self))
++ if kwds:
++ raise ValueError('Got unexpected field names: %r' % kwds.keys())
++ return result
++
++ def __getnewargs__(self):
++ 'Return self as a plain tuple. Used by copy and pickle.'
++ return tuple(self)
++
++{field_defs}
++'''
++
++_repr_template = '{name}=%r'
++
++_field_template = '''\
++ {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
++'''
++
+ def namedtuple(typename, field_names, verbose=False, rename=False):
+ """Returns a new subclass of tuple with named fields.
+
+- >>> Point = namedtuple('Point', 'x y')
++ >>> Point = namedtuple('Point', ['x', 'y'])
+ >>> Point.__doc__ # docstring for the new class
+ 'Point(x, y)'
+ >>> p = Point(11, y=22) # instantiate with positional args or keywords
+@@ -258,83 +308,63 @@
+
+ """
+
+- # Parse and validate the field names. Validation serves two purposes,
+- # generating informative error messages and preventing template injection attacks.
++ # Validate the field names. At the user's option, either generate an error
++ # message or automatically replace the field name with a valid name.
+ if isinstance(field_names, basestring):
+- field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
+- field_names = tuple(map(str, field_names))
++ field_names = field_names.replace(',', ' ').split()
++ field_names = map(str, field_names)
+ if rename:
+- names = list(field_names)
+ seen = set()
+- for i, name in enumerate(names):
+- if (not all(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
+- or not name or name[0].isdigit() or name.startswith('_')
++ for index, name in enumerate(field_names):
++ if (not all(c.isalnum() or c=='_' for c in name)
++ or _iskeyword(name)
++ or not name
++ or name[0].isdigit()
++ or name.startswith('_')
+ or name in seen):
+- names[i] = '_%d' % i
++ field_names[index] = '_%d' % index
+ seen.add(name)
+- field_names = tuple(names)
+- for name in (typename,) + field_names:
++ for name in [typename] + field_names:
+ if not all(c.isalnum() or c=='_' for c in name):
+- raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
++ raise ValueError('Type names and field names can only contain '
++ 'alphanumeric characters and underscores: %r' % name)
+ if _iskeyword(name):
+- raise ValueError('Type names and field names cannot be a keyword: %r' % name)
++ raise ValueError('Type names and field names cannot be a '
++ 'keyword: %r' % name)
+ if name[0].isdigit():
+- raise ValueError('Type names and field names cannot start with a number: %r' % name)
+- seen_names = set()
++ raise ValueError('Type names and field names cannot start with '
++ 'a number: %r' % name)
++ seen = set()
+ for name in field_names:
+ if name.startswith('_') and not rename:
+- raise ValueError('Field names cannot start with an underscore: %r' % name)
+- if name in seen_names:
++ raise ValueError('Field names cannot start with an underscore: '
++ '%r' % name)
++ if name in seen:
+ raise ValueError('Encountered duplicate field name: %r' % name)
+- seen_names.add(name)
++ seen.add(name)
+
+- # Create and fill-in the class template
+- numfields = len(field_names)
+- argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
+- reprtxt = ', '.join('%s=%%r' % name for name in field_names)
+- template = '''class %(typename)s(tuple):
+- '%(typename)s(%(argtxt)s)' \n
+- __slots__ = () \n
+- _fields = %(field_names)r \n
+- def __new__(_cls, %(argtxt)s):
+- 'Create new instance of %(typename)s(%(argtxt)s)'
+- return _tuple.__new__(_cls, (%(argtxt)s)) \n
+- @classmethod
+- def _make(cls, iterable, new=tuple.__new__, len=len):
+- 'Make a new %(typename)s object from a sequence or iterable'
+- result = new(cls, iterable)
+- if len(result) != %(numfields)d:
+- raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
+- return result \n
+- def __repr__(self):
+- 'Return a nicely formatted representation string'
+- return '%(typename)s(%(reprtxt)s)' %% self \n
+- def _asdict(self):
+- 'Return a new OrderedDict which maps field names to their values'
+- return OrderedDict(zip(self._fields, self)) \n
+- __dict__ = property(_asdict) \n
+- def _replace(_self, **kwds):
+- 'Return a new %(typename)s object replacing specified fields with new values'
+- result = _self._make(map(kwds.pop, %(field_names)r, _self))
+- if kwds:
+- raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
+- return result \n
+- def __getnewargs__(self):
+- 'Return self as a plain tuple. Used by copy and pickle.'
+- return tuple(self) \n\n''' % locals()
+- for i, name in enumerate(field_names):
+- template += " %s = _property(_itemgetter(%d), doc='Alias for field number %d')\n" % (name, i, i)
++ # Fill-in the class template
++ class_definition = _class_template.format(
++ typename = typename,
++ field_names = tuple(field_names),
++ num_fields = len(field_names),
++ arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
++ repr_fmt = ', '.join(_repr_template.format(name=name)
++ for name in field_names),
++ field_defs = '\n'.join(_field_template.format(index=index, name=name)
++ for index, name in enumerate(field_names))
++ )
+ if verbose:
+- print template
++ print class_definition
+
+- # Execute the template string in a temporary namespace and
+- # support tracing utilities by setting a value for frame.f_globals['__name__']
++ # Execute the template string in a temporary namespace and support
++ # tracing utilities by setting a value for frame.f_globals['__name__']
+ namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
+ OrderedDict=OrderedDict, _property=property, _tuple=tuple)
+ try:
+- exec template in namespace
+- except SyntaxError, e:
+- raise SyntaxError(e.message + ':\n' + template)
++ exec class_definition in namespace
++ except SyntaxError as e:
++ raise SyntaxError(e.message + ':\n' + class_definition)
+ result = namespace[typename]
+
+ # For pickling to work, the __module__ variable needs to be set to the frame
+diff -r 70274d53c1dd Lib/compiler/consts.py
+--- a/Lib/compiler/consts.py
++++ b/Lib/compiler/consts.py
+@@ -5,7 +5,7 @@
+
+ SC_LOCAL = 1
+ SC_GLOBAL_IMPLICIT = 2
+-SC_GLOBAL_EXPLICT = 3
++SC_GLOBAL_EXPLICIT = 3
+ SC_FREE = 4
+ SC_CELL = 5
+ SC_UNKNOWN = 6
+diff -r 70274d53c1dd Lib/compiler/pycodegen.py
+--- a/Lib/compiler/pycodegen.py
++++ b/Lib/compiler/pycodegen.py
+@@ -7,7 +7,7 @@
+
+ from compiler import ast, parse, walk, syntax
+ from compiler import pyassem, misc, future, symbols
+-from compiler.consts import SC_LOCAL, SC_GLOBAL_IMPLICIT, SC_GLOBAL_EXPLICT, \
++from compiler.consts import SC_LOCAL, SC_GLOBAL_IMPLICIT, SC_GLOBAL_EXPLICIT, \
+ SC_FREE, SC_CELL
+ from compiler.consts import (CO_VARARGS, CO_VARKEYWORDS, CO_NEWLOCALS,
+ CO_NESTED, CO_GENERATOR, CO_FUTURE_DIVISION,
+@@ -283,7 +283,7 @@
+ self.emit(prefix + '_NAME', name)
+ else:
+ self.emit(prefix + '_FAST', name)
+- elif scope == SC_GLOBAL_EXPLICT:
++ elif scope == SC_GLOBAL_EXPLICIT:
+ self.emit(prefix + '_GLOBAL', name)
+ elif scope == SC_GLOBAL_IMPLICIT:
+ if not self.optimized:
+diff -r 70274d53c1dd Lib/compiler/symbols.py
+--- a/Lib/compiler/symbols.py
++++ b/Lib/compiler/symbols.py
+@@ -1,7 +1,7 @@
+ """Module symbol-table generator"""
+
+ from compiler import ast
+-from compiler.consts import SC_LOCAL, SC_GLOBAL_IMPLICIT, SC_GLOBAL_EXPLICT, \
++from compiler.consts import SC_LOCAL, SC_GLOBAL_IMPLICIT, SC_GLOBAL_EXPLICIT, \
+ SC_FREE, SC_CELL, SC_UNKNOWN
+ from compiler.misc import mangle
+ import types
+@@ -90,7 +90,7 @@
+ The scope of a name could be LOCAL, GLOBAL, FREE, or CELL.
+ """
+ if name in self.globals:
+- return SC_GLOBAL_EXPLICT
++ return SC_GLOBAL_EXPLICIT
+ if name in self.cells:
+ return SC_CELL
+ if name in self.defs:
+diff -r 70274d53c1dd Lib/cookielib.py
+--- a/Lib/cookielib.py
++++ b/Lib/cookielib.py
+@@ -1,4 +1,4 @@
+-"""HTTP cookie handling for web clients.
++r"""HTTP cookie handling for web clients.
+
+ This module has (now fairly distant) origins in Gisle Aas' Perl module
+ HTTP::Cookies, from the libwww-perl library.
+diff -r 70274d53c1dd Lib/ctypes/test/test_bitfields.py
+--- a/Lib/ctypes/test/test_bitfields.py
++++ b/Lib/ctypes/test/test_bitfields.py
+@@ -240,5 +240,25 @@
+ _anonymous_ = ["_"]
+ _fields_ = [("_", X)]
+
++ @unittest.skipUnless(hasattr(ctypes, "c_uint32"), "c_int32 is required")
++ def test_uint32(self):
++ class X(Structure):
++ _fields_ = [("a", c_uint32, 32)]
++ x = X()
++ x.a = 10
++ self.assertEquals(x.a, 10)
++ x.a = 0xFDCBA987
++ self.assertEquals(x.a, 0xFDCBA987)
++
++ @unittest.skipUnless(hasattr(ctypes, "c_uint64"), "c_int64 is required")
++ def test_uint64(self):
++ class X(Structure):
++ _fields_ = [("a", c_uint64, 64)]
++ x = X()
++ x.a = 10
++ self.assertEquals(x.a, 10)
++ x.a = 0xFEDCBA9876543211
++ self.assertEquals(x.a, 0xFEDCBA9876543211)
++
+ if __name__ == "__main__":
+ unittest.main()
+diff -r 70274d53c1dd Lib/ctypes/test/test_numbers.py
+--- a/Lib/ctypes/test/test_numbers.py
++++ b/Lib/ctypes/test/test_numbers.py
+@@ -216,6 +216,16 @@
+ # probably be changed:
+ self.assertRaises(TypeError, c_int, c_long(42))
+
++ def test_float_overflow(self):
++ import sys
++ big_int = int(sys.float_info.max) * 2
++ for t in float_types + [c_longdouble]:
++ self.assertRaises(OverflowError, t, big_int)
++ if (hasattr(t, "__ctype_be__")):
++ self.assertRaises(OverflowError, t.__ctype_be__, big_int)
++ if (hasattr(t, "__ctype_le__")):
++ self.assertRaises(OverflowError, t.__ctype_le__, big_int)
++
+ ## def test_perf(self):
+ ## check_perf()
+
+diff -r 70274d53c1dd Lib/distutils/command/bdist_rpm.py
+--- a/Lib/distutils/command/bdist_rpm.py
++++ b/Lib/distutils/command/bdist_rpm.py
+@@ -379,16 +379,28 @@
+ self.spawn(rpm_cmd)
+
+ if not self.dry_run:
++ if self.distribution.has_ext_modules():
++ pyversion = get_python_version()
++ else:
++ pyversion = 'any'
++
+ if not self.binary_only:
+ srpm = os.path.join(rpm_dir['SRPMS'], source_rpm)
+ assert(os.path.exists(srpm))
+ self.move_file(srpm, self.dist_dir)
++ filename = os.path.join(self.dist_dir, source_rpm)
++ self.distribution.dist_files.append(
++ ('bdist_rpm', pyversion, filename))
+
+ if not self.source_only:
+ for rpm in binary_rpms:
+ rpm = os.path.join(rpm_dir['RPMS'], rpm)
+ if os.path.exists(rpm):
+ self.move_file(rpm, self.dist_dir)
++ filename = os.path.join(self.dist_dir,
++ os.path.basename(rpm))
++ self.distribution.dist_files.append(
++ ('bdist_rpm', pyversion, filename))
+ # run()
+
+ def _dist_path(self, path):
+diff -r 70274d53c1dd Lib/distutils/config.py
+--- a/Lib/distutils/config.py
++++ b/Lib/distutils/config.py
+@@ -42,7 +42,7 @@
+ def _store_pypirc(self, username, password):
+ """Creates a default .pypirc file."""
+ rc = self._get_rc_file()
+- f = open(rc, 'w')
++ f = os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0600), 'w')
+ try:
+ f.write(DEFAULT_PYPIRC % (username, password))
+ finally:
+diff -r 70274d53c1dd Lib/distutils/dir_util.py
+--- a/Lib/distutils/dir_util.py
++++ b/Lib/distutils/dir_util.py
+@@ -144,6 +144,10 @@
+ src_name = os.path.join(src, n)
+ dst_name = os.path.join(dst, n)
+
++ if n.startswith('.nfs'):
++ # skip NFS rename files
++ continue
++
+ if preserve_symlinks and os.path.islink(src_name):
+ link_dest = os.readlink(src_name)
+ if verbose >= 1:
+diff -r 70274d53c1dd Lib/distutils/tests/test_bdist_dumb.py
+--- a/Lib/distutils/tests/test_bdist_dumb.py
++++ b/Lib/distutils/tests/test_bdist_dumb.py
+@@ -1,8 +1,10 @@
+ """Tests for distutils.command.bdist_dumb."""
+
++import os
++import sys
++import zipfile
+ import unittest
+-import sys
+-import os
++from test.test_support import run_unittest
+
+ # zlib is not used here, but if it's not available
+ # test_simple_built will fail
+@@ -11,8 +13,6 @@
+ except ImportError:
+ zlib = None
+
+-from test.test_support import run_unittest
+-
+ from distutils.core import Distribution
+ from distutils.command.bdist_dumb import bdist_dumb
+ from distutils.tests import support
+@@ -73,15 +73,23 @@
+
+ # see what we have
+ dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
+- base = "%s.%s" % (dist.get_fullname(), cmd.plat_name)
++ base = "%s.%s.zip" % (dist.get_fullname(), cmd.plat_name)
+ if os.name == 'os2':
+ base = base.replace(':', '-')
+
+- wanted = ['%s.zip' % base]
+- self.assertEqual(dist_created, wanted)
++ self.assertEqual(dist_created, [base])
+
+ # now let's check what we have in the zip file
+- # XXX to be done
++ fp = zipfile.ZipFile(os.path.join('dist', base))
++ try:
++ contents = fp.namelist()
++ finally:
++ fp.close()
++
++ contents = sorted(os.path.basename(fn) for fn in contents)
++ wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2],
++ 'foo.py', 'foo.pyc']
++ self.assertEqual(contents, sorted(wanted))
+
+ def test_finalize_options(self):
+ pkg_dir, dist = self.create_dist()
+diff -r 70274d53c1dd Lib/distutils/tests/test_bdist_msi.py
+--- a/Lib/distutils/tests/test_bdist_msi.py
++++ b/Lib/distutils/tests/test_bdist_msi.py
+@@ -1,12 +1,11 @@
+ """Tests for distutils.command.bdist_msi."""
++import sys
+ import unittest
+-import sys
+-
+ from test.test_support import run_unittest
+-
+ from distutils.tests import support
+
+-@unittest.skipUnless(sys.platform=="win32", "These tests are only for win32")
++
++@unittest.skipUnless(sys.platform == 'win32', 'these tests require Windows')
+ class BDistMSITestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ unittest.TestCase):
+@@ -14,10 +13,11 @@
+ def test_minimal(self):
+ # minimal test XXX need more tests
+ from distutils.command.bdist_msi import bdist_msi
+- pkg_pth, dist = self.create_dist()
++ project_dir, dist = self.create_dist()
+ cmd = bdist_msi(dist)
+ cmd.ensure_finalized()
+
++
+ def test_suite():
+ return unittest.makeSuite(BDistMSITestCase)
+
+diff -r 70274d53c1dd Lib/distutils/tests/test_bdist_rpm.py
+--- a/Lib/distutils/tests/test_bdist_rpm.py
++++ b/Lib/distutils/tests/test_bdist_rpm.py
+@@ -79,6 +79,10 @@
+ dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
+ self.assertTrue('foo-0.1-1.noarch.rpm' in dist_created)
+
++ # bug #2945: upload ignores bdist_rpm files
++ self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
++ self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
++
+ def test_no_optimize_flag(self):
+
+ # XXX I am unable yet to make this test work without
+@@ -118,6 +122,11 @@
+
+ dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
+ self.assertTrue('foo-0.1-1.noarch.rpm' in dist_created)
++
++ # bug #2945: upload ignores bdist_rpm files
++ self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
++ self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
++
+ os.remove(os.path.join(pkg_dir, 'dist', 'foo-0.1-1.noarch.rpm'))
+
+ def test_suite():
+diff -r 70274d53c1dd Lib/distutils/tests/test_dir_util.py
+--- a/Lib/distutils/tests/test_dir_util.py
++++ b/Lib/distutils/tests/test_dir_util.py
+@@ -101,6 +101,24 @@
+ remove_tree(self.root_target, verbose=0)
+ remove_tree(self.target2, verbose=0)
+
++ def test_copy_tree_skips_nfs_temp_files(self):
++ mkpath(self.target, verbose=0)
++
++ a_file = os.path.join(self.target, 'ok.txt')
++ nfs_file = os.path.join(self.target, '.nfs123abc')
++ for f in a_file, nfs_file:
++ fh = open(f, 'w')
++ try:
++ fh.write('some content')
++ finally:
++ fh.close()
++
++ copy_tree(self.target, self.target2)
++ self.assertEqual(os.listdir(self.target2), ['ok.txt'])
++
++ remove_tree(self.root_target, verbose=0)
++ remove_tree(self.target2, verbose=0)
++
+ def test_ensure_relative(self):
+ if os.sep == '/':
+ self.assertEqual(ensure_relative('/home/foo'), 'home/foo')
+diff -r 70274d53c1dd Lib/distutils/tests/test_install.py
+--- a/Lib/distutils/tests/test_install.py
++++ b/Lib/distutils/tests/test_install.py
+@@ -86,19 +86,17 @@
+ self.old_expand = os.path.expanduser
+ os.path.expanduser = _expanduser
+
+- try:
+- # this is the actual test
+- self._test_user_site()
+- finally:
++ def cleanup():
+ site.USER_BASE = self.old_user_base
+ site.USER_SITE = self.old_user_site
+ install_module.USER_BASE = self.old_user_base
+ install_module.USER_SITE = self.old_user_site
+ os.path.expanduser = self.old_expand
+
+- def _test_user_site(self):
++ self.addCleanup(cleanup)
++
+ for key in ('nt_user', 'unix_user', 'os2_home'):
+- self.assertTrue(key in INSTALL_SCHEMES)
++ self.assertIn(key, INSTALL_SCHEMES)
+
+ dist = Distribution({'name': 'xx'})
+ cmd = install(dist)
+@@ -106,14 +104,14 @@
+ # making sure the user option is there
+ options = [name for name, short, lable in
+ cmd.user_options]
+- self.assertTrue('user' in options)
++ self.assertIn('user', options)
+
+ # setting a value
+ cmd.user = 1
+
+ # user base and site shouldn't be created yet
+- self.assertTrue(not os.path.exists(self.user_base))
+- self.assertTrue(not os.path.exists(self.user_site))
++ self.assertFalse(os.path.exists(self.user_base))
++ self.assertFalse(os.path.exists(self.user_site))
+
+ # let's run finalize
+ cmd.ensure_finalized()
+@@ -122,8 +120,8 @@
+ self.assertTrue(os.path.exists(self.user_base))
+ self.assertTrue(os.path.exists(self.user_site))
+
+- self.assertTrue('userbase' in cmd.config_vars)
+- self.assertTrue('usersite' in cmd.config_vars)
++ self.assertIn('userbase', cmd.config_vars)
++ self.assertIn('usersite', cmd.config_vars)
+
+ def test_handle_extra_path(self):
+ dist = Distribution({'name': 'xx', 'extra_path': 'path,dirs'})
+@@ -176,15 +174,16 @@
+
+ def test_record(self):
+ install_dir = self.mkdtemp()
+- project_dir, dist = self.create_dist(scripts=['hello'])
+- self.addCleanup(os.chdir, os.getcwd())
++ project_dir, dist = self.create_dist(py_modules=['hello'],
++ scripts=['sayhi'])
+ os.chdir(project_dir)
+- self.write_file('hello', "print('o hai')")
++ self.write_file('hello.py', "def main(): print 'o hai'")
++ self.write_file('sayhi', 'from hello import main; main()')
+
+ cmd = install(dist)
+ dist.command_obj['install'] = cmd
+ cmd.root = install_dir
+- cmd.record = os.path.join(project_dir, 'RECORD')
++ cmd.record = os.path.join(project_dir, 'filelist')
+ cmd.ensure_finalized()
+ cmd.run()
+
+@@ -195,7 +194,7 @@
+ f.close()
+
+ found = [os.path.basename(line) for line in content.splitlines()]
+- expected = ['hello',
++ expected = ['hello.py', 'hello.pyc', 'sayhi',
+ 'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
+ self.assertEqual(found, expected)
+
+@@ -203,7 +202,6 @@
+ install_dir = self.mkdtemp()
+ project_dir, dist = self.create_dist(ext_modules=[
+ Extension('xx', ['xxmodule.c'])])
+- self.addCleanup(os.chdir, os.getcwd())
+ os.chdir(project_dir)
+ support.copy_xxmodule_c(project_dir)
+
+@@ -215,7 +213,7 @@
+ dist.command_obj['install'] = cmd
+ dist.command_obj['build_ext'] = buildextcmd
+ cmd.root = install_dir
+- cmd.record = os.path.join(project_dir, 'RECORD')
++ cmd.record = os.path.join(project_dir, 'filelist')
+ cmd.ensure_finalized()
+ cmd.run()
+
+@@ -241,6 +239,7 @@
+ install_module.DEBUG = False
+ self.assertTrue(len(self.logs) > old_logs_len)
+
++
+ def test_suite():
+ return unittest.makeSuite(InstallTestCase)
+
+diff -r 70274d53c1dd Lib/distutils/tests/test_sdist.py
+--- a/Lib/distutils/tests/test_sdist.py
++++ b/Lib/distutils/tests/test_sdist.py
+@@ -6,6 +6,7 @@
+ import zipfile
+ from os.path import join
+ from textwrap import dedent
++from test.test_support import captured_stdout, check_warnings, run_unittest
+
+ # zlib is not used here, but if it's not available
+ # the tests that use zipfile may fail
+@@ -21,7 +22,6 @@
+ except ImportError:
+ UID_GID_SUPPORT = False
+
+-from test.test_support import captured_stdout, check_warnings, run_unittest
+
+ from distutils.command.sdist import sdist, show_formats
+ from distutils.core import Distribution
+@@ -91,9 +91,8 @@
+
+ @unittest.skipUnless(zlib, "requires zlib")
+ def test_prune_file_list(self):
+- # this test creates a package with some vcs dirs in it
+- # and launch sdist to make sure they get pruned
+- # on all systems
++ # this test creates a project with some VCS dirs and an NFS rename
++ # file, then launches sdist to check they get pruned on all systems
+
+ # creating VCS directories with some files in them
+ os.mkdir(join(self.tmp_dir, 'somecode', '.svn'))
+@@ -107,6 +106,8 @@
+ self.write_file((self.tmp_dir, 'somecode', '.git',
+ 'ok'), 'xxx')
+
++ self.write_file((self.tmp_dir, 'somecode', '.nfs0001'), 'xxx')
++
+ # now building a sdist
+ dist, cmd = self.get_cmd()
+
+@@ -375,7 +376,7 @@
+ # the following tests make sure there is a nice error message instead
+ # of a traceback when parsing an invalid manifest template
+
+- def _test_template(self, content):
++ def _check_template(self, content):
+ dist, cmd = self.get_cmd()
+ os.chdir(self.tmp_dir)
+ self.write_file('MANIFEST.in', content)
+@@ -386,17 +387,17 @@
+ self.assertEqual(len(warnings), 1)
+
+ def test_invalid_template_unknown_command(self):
+- self._test_template('taunt knights *')
++ self._check_template('taunt knights *')
+
+ def test_invalid_template_wrong_arguments(self):
+ # this manifest command takes one argument
+- self._test_template('prune')
++ self._check_template('prune')
+
+ @unittest.skipIf(os.name != 'nt', 'test relevant for Windows only')
+ def test_invalid_template_wrong_path(self):
+ # on Windows, trailing slashes are not allowed
+ # this used to crash instead of raising a warning: #8286
+- self._test_template('include examples/')
++ self._check_template('include examples/')
+
+ @unittest.skipUnless(zlib, "requires zlib")
+ def test_get_file_list(self):
+diff -r 70274d53c1dd Lib/doctest.py
+--- a/Lib/doctest.py
++++ b/Lib/doctest.py
+@@ -2314,7 +2314,8 @@
+ return "Doctest: " + self._dt_test.name
+
+ class SkipDocTestCase(DocTestCase):
+- def __init__(self):
++ def __init__(self, module):
++ self.module = module
+ DocTestCase.__init__(self, None)
+
+ def setUp(self):
+@@ -2324,7 +2325,10 @@
+ pass
+
+ def shortDescription(self):
+- return "Skipping tests from %s" % module.__name__
++ return "Skipping tests from %s" % self.module.__name__
++
++ __str__ = shortDescription
++
+
+ def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
+ **options):
+@@ -2372,7 +2376,7 @@
+ if not tests and sys.flags.optimize >=2:
+ # Skip doctests when running with -O2
+ suite = unittest.TestSuite()
+- suite.addTest(SkipDocTestCase())
++ suite.addTest(SkipDocTestCase(module))
+ return suite
+ elif not tests:
+ # Why do we want to do this? Because it reveals a bug that might
+diff -r 70274d53c1dd Lib/email/_parseaddr.py
+--- a/Lib/email/_parseaddr.py
++++ b/Lib/email/_parseaddr.py
+@@ -13,7 +13,7 @@
+ 'quote',
+ ]
+
+-import time
++import time, calendar
+
+ SPACE = ' '
+ EMPTYSTRING = ''
+@@ -150,13 +150,13 @@
+
+
+ def mktime_tz(data):
+- """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
++ """Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
+ if data[9] is None:
+ # No zone info, so localtime is better assumption than GMT
+ return time.mktime(data[:8] + (-1,))
+ else:
+- t = time.mktime(data[:8] + (0,))
+- return t - data[9] - time.timezone
++ t = calendar.timegm(data)
++ return t - data[9]
+
+
+ def quote(str):
+diff -r 70274d53c1dd Lib/email/generator.py
+--- a/Lib/email/generator.py
++++ b/Lib/email/generator.py
+@@ -212,7 +212,11 @@
+ msg.set_boundary(boundary)
+ # If there's a preamble, write it out, with a trailing CRLF
+ if msg.preamble is not None:
+- print >> self._fp, msg.preamble
++ if self._mangle_from_:
++ preamble = fcre.sub('>From ', msg.preamble)
++ else:
++ preamble = msg.preamble
++ print >> self._fp, preamble
+ # dash-boundary transport-padding CRLF
+ print >> self._fp, '--' + boundary
+ # body-part
+@@ -230,7 +234,11 @@
+ self._fp.write('\n--' + boundary + '--')
+ if msg.epilogue is not None:
+ print >> self._fp
+- self._fp.write(msg.epilogue)
++ if self._mangle_from_:
++ epilogue = fcre.sub('>From ', msg.epilogue)
++ else:
++ epilogue = msg.epilogue
++ self._fp.write(epilogue)
+
+ def _handle_multipart_signed(self, msg):
+ # The contents of signed parts has to stay unmodified in order to keep
+diff -r 70274d53c1dd Lib/email/test/test_email.py
+--- a/Lib/email/test/test_email.py
++++ b/Lib/email/test/test_email.py
+@@ -9,6 +9,7 @@
+ import difflib
+ import unittest
+ import warnings
++import textwrap
+ from cStringIO import StringIO
+
+ import email
+@@ -948,6 +949,28 @@
+ Blah blah blah
+ """)
+
++ def test_mangle_from_in_preamble_and_epilog(self):
++ s = StringIO()
++ g = Generator(s, mangle_from_=True)
++ msg = email.message_from_string(textwrap.dedent("""\
++ From: foo@bar.com
++ Mime-Version: 1.0
++ Content-Type: multipart/mixed; boundary=XXX
++
++ From somewhere unknown
++
++ --XXX
++ Content-Type: text/plain
++
++ foo
++
++ --XXX--
++
++ From somewhere unknowable
++ """))
++ g.flatten(msg)
++ self.assertEqual(len([1 for x in s.getvalue().split('\n')
++ if x.startswith('>From ')]), 2)
+
+
+ # Test the basic MIMEAudio class
+@@ -2262,6 +2285,12 @@
+ eq(time.localtime(t)[:6], timetup[:6])
+ eq(int(time.strftime('%Y', timetup[:9])), 2003)
+
++ def test_mktime_tz(self):
++ self.assertEqual(Utils.mktime_tz((1970, 1, 1, 0, 0, 0,
++ -1, -1, -1, 0)), 0)
++ self.assertEqual(Utils.mktime_tz((1970, 1, 1, 0, 0, 0,
++ -1, -1, -1, 1234)), -1234)
++
+ def test_parsedate_y2k(self):
+ """Test for parsing a date with a two-digit year.
+
+diff -r 70274d53c1dd Lib/glob.py
+--- a/Lib/glob.py
++++ b/Lib/glob.py
+@@ -5,6 +5,14 @@
+ import re
+ import fnmatch
+
++try:
++ _unicode = unicode
++except NameError:
++ # If Python is built without Unicode support, the unicode type
++ # will not exist. Fake one.
++ class _unicode(object):
++ pass
++
+ __all__ = ["glob", "iglob"]
+
+ def glob(pathname):
+@@ -49,7 +57,7 @@
+ def glob1(dirname, pattern):
+ if not dirname:
+ dirname = os.curdir
+- if isinstance(pattern, unicode) and not isinstance(dirname, unicode):
++ if isinstance(pattern, _unicode) and not isinstance(dirname, unicode):
+ dirname = unicode(dirname, sys.getfilesystemencoding() or
+ sys.getdefaultencoding())
+ try:
+diff -r 70274d53c1dd Lib/hashlib.py
+--- a/Lib/hashlib.py
++++ b/Lib/hashlib.py
+@@ -88,7 +88,7 @@
+ except ImportError:
+ pass # no extension module, this hash is unsupported.
+
+- raise ValueError('unsupported hash type %s' % name)
++ raise ValueError('unsupported hash type ' + name)
+
+
+ def __get_openssl_constructor(name):
+diff -r 70274d53c1dd Lib/httplib.py
+--- a/Lib/httplib.py
++++ b/Lib/httplib.py
+@@ -1,4 +1,4 @@
+-"""HTTP/1.1 client library
++r"""HTTP/1.1 client library
+
+ <intro stuff goes here>
+ <other stuff, too>
+@@ -748,7 +748,11 @@
+ line = response.fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("header line")
+- if line == '\r\n': break
++ if not line:
++ # for sites which EOF without sending trailer
++ break
++ if line == '\r\n':
++ break
+
+
+ def connect(self):
+@@ -985,7 +989,7 @@
+
+ self.putrequest(method, url, **skips)
+
+- if body and ('content-length' not in header_names):
++ if body is not None and 'content-length' not in header_names:
+ self._set_content_length(body)
+ for hdr, value in headers.iteritems():
+ self.putheader(hdr, value)
+diff -r 70274d53c1dd Lib/idlelib/CallTipWindow.py
+--- a/Lib/idlelib/CallTipWindow.py
++++ b/Lib/idlelib/CallTipWindow.py
+@@ -22,6 +22,7 @@
+ self.parenline = self.parencol = None
+ self.lastline = None
+ self.hideid = self.checkhideid = None
++ self.checkhide_after_id = None
+
+ def position_window(self):
+ """Check if needs to reposition the window, and if so - do it."""
+@@ -102,7 +103,10 @@
+ self.hidetip()
+ else:
+ self.position_window()
+- self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
++ if self.checkhide_after_id is not None:
++ self.widget.after_cancel(self.checkhide_after_id)
++ self.checkhide_after_id = \
++ self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
+
+ def hide_event(self, event):
+ if not self.tipwindow:
+diff -r 70274d53c1dd Lib/idlelib/CallTips.py
+--- a/Lib/idlelib/CallTips.py
++++ b/Lib/idlelib/CallTips.py
+@@ -71,16 +71,16 @@
+ if not sur_paren:
+ return
+ hp.set_index(sur_paren[0])
+- name = hp.get_expression()
+- if not name or (not evalfuncs and name.find('(') != -1):
++ expression = hp.get_expression()
++ if not expression or (not evalfuncs and expression.find('(') != -1):
+ return
+- arg_text = self.fetch_tip(name)
++ arg_text = self.fetch_tip(expression)
+ if not arg_text:
+ return
+ self.calltip = self._make_calltip_window()
+ self.calltip.showtip(arg_text, sur_paren[0], sur_paren[1])
+
+- def fetch_tip(self, name):
++ def fetch_tip(self, expression):
+ """Return the argument list and docstring of a function or class
+
+ If there is a Python subprocess, get the calltip there. Otherwise,
+@@ -96,23 +96,27 @@
+ """
+ try:
+ rpcclt = self.editwin.flist.pyshell.interp.rpcclt
+- except:
++ except AttributeError:
+ rpcclt = None
+ if rpcclt:
+ return rpcclt.remotecall("exec", "get_the_calltip",
+- (name,), {})
++ (expression,), {})
+ else:
+- entity = self.get_entity(name)
++ entity = self.get_entity(expression)
+ return get_arg_text(entity)
+
+- def get_entity(self, name):
+- "Lookup name in a namespace spanning sys.modules and __main.dict__"
+- if name:
++ def get_entity(self, expression):
++ """Return the object corresponding to expression evaluated
++ in a namespace spanning sys.modules and __main.dict__.
++ """
++ if expression:
+ namespace = sys.modules.copy()
+ namespace.update(__main__.__dict__)
+ try:
+- return eval(name, namespace)
+- except (NameError, AttributeError):
++ return eval(expression, namespace)
++ except BaseException:
++ # An uncaught exception closes idle, and eval can raise any
++ # exception, especially if user classes are involved.
+ return None
+
+ def _find_constructor(class_ob):
+@@ -127,9 +131,10 @@
+ return None
+
+ def get_arg_text(ob):
+- """Get a string describing the arguments for the given object"""
++ """Get a string describing the arguments for the given object,
++ only if it is callable."""
+ arg_text = ""
+- if ob is not None:
++ if ob is not None and hasattr(ob, '__call__'):
+ arg_offset = 0
+ if type(ob) in (types.ClassType, types.TypeType):
+ # Look for the highest __init__ in the class chain.
+diff -r 70274d53c1dd Lib/idlelib/EditorWindow.py
+--- a/Lib/idlelib/EditorWindow.py
++++ b/Lib/idlelib/EditorWindow.py
+@@ -856,7 +856,7 @@
+ # for each edit window instance, construct the recent files menu
+ for instance in self.top.instance_dict.keys():
+ menu = instance.recent_files_menu
+- menu.delete(1, END) # clear, and rebuild:
++ menu.delete(0, END) # clear, and rebuild:
+ for i, file_name in enumerate(rf_list):
+ file_name = file_name.rstrip() # zap \n
+ # make unicode string to display non-ASCII chars correctly
+diff -r 70274d53c1dd Lib/idlelib/IOBinding.py
+--- a/Lib/idlelib/IOBinding.py
++++ b/Lib/idlelib/IOBinding.py
+@@ -196,29 +196,33 @@
+ self.filename_change_hook()
+
+ def open(self, event=None, editFile=None):
+- if self.editwin.flist:
++ flist = self.editwin.flist
++ # Save in case parent window is closed (ie, during askopenfile()).
++ if flist:
+ if not editFile:
+ filename = self.askopenfile()
+ else:
+ filename=editFile
+ if filename:
+- # If the current window has no filename and hasn't been
+- # modified, we replace its contents (no loss). Otherwise
+- # we open a new window. But we won't replace the
+- # shell window (which has an interp(reter) attribute), which
+- # gets set to "not modified" at every new prompt.
+- try:
+- interp = self.editwin.interp
+- except AttributeError:
+- interp = None
+- if not self.filename and self.get_saved() and not interp:
+- self.editwin.flist.open(filename, self.loadfile)
++ # If editFile is valid and already open, flist.open will
++ # shift focus to its existing window.
++ # If the current window exists and is a fresh unnamed,
++ # unmodified editor window (not an interpreter shell),
++ # pass self.loadfile to flist.open so it will load the file
++ # in the current window (if the file is not already open)
++ # instead of a new window.
++ if (self.editwin and
++ not getattr(self.editwin, 'interp', None) and
++ not self.filename and
++ self.get_saved()):
++ flist.open(filename, self.loadfile)
+ else:
+- self.editwin.flist.open(filename)
++ flist.open(filename)
+ else:
+- self.text.focus_set()
++ if self.text:
++ self.text.focus_set()
+ return "break"
+- #
++
+ # Code for use outside IDLE:
+ if self.get_saved():
+ reply = self.maybesave()
+diff -r 70274d53c1dd Lib/idlelib/NEWS.txt
+--- a/Lib/idlelib/NEWS.txt
++++ b/Lib/idlelib/NEWS.txt
+@@ -1,5 +1,35 @@
++What's New in IDLE 2.7.4?
++=========================
++
++- Issue #15318: Prevent writing to sys.stdin.
++
++- Issue #13532, #15319: Check that arguments to sys.stdout.write are strings.
++
++- Issue # 12510: Attempt to get certain tool tips no longer crashes IDLE.
++
++- Issue10365: File open dialog now works instead of crashing even when
++ parent window is closed while dialog is open.
++
++- Issue 14876: use user-selected font for highlight configuration.
++
++- Issue #14018: Update checks for unstable system Tcl/Tk versions on OS X
++ to include versions shipped with OS X 10.7 and 10.8 in addition to 10.6.
++
++
++What's New in IDLE 2.7.3?
++=========================
++
++- Issue #14409: IDLE now properly executes commands in the Shell window
++ when it cannot read the normal config files on startup and
++ has to use the built-in default key bindings.
++ There was previously a bug in one of the defaults.
++
++- Issue #3573: IDLE hangs when passing invalid command line args
++ (directory(ies) instead of file(s)).
++
++
+ What's New in IDLE 2.7.2?
+-=======================
++=========================
+
+ *Release date: 29-May-2011*
+
+diff -r 70274d53c1dd Lib/idlelib/PyShell.py
+--- a/Lib/idlelib/PyShell.py
++++ b/Lib/idlelib/PyShell.py
+@@ -11,6 +11,7 @@
+ import threading
+ import traceback
+ import types
++import io
+
+ import linecache
+ from code import InteractiveInterpreter
+@@ -251,8 +252,8 @@
+ def ranges_to_linenumbers(self, ranges):
+ lines = []
+ for index in range(0, len(ranges), 2):
+- lineno = int(float(ranges[index]))
+- end = int(float(ranges[index+1]))
++ lineno = int(float(ranges[index].string))
++ end = int(float(ranges[index+1].string))
+ while lineno < end:
+ lines.append(lineno)
+ lineno += 1
+@@ -313,6 +314,11 @@
+ "console": idleConf.GetHighlight(theme, "console"),
+ })
+
++ def removecolors(self):
++ # Don't remove shell color tags before "iomark"
++ for tag in self.tagdefs:
++ self.tag_remove(tag, "iomark", "end")
++
+ class ModifiedUndoDelegator(UndoDelegator):
+ "Extend base class: forbid insert/delete before the I/O mark"
+
+@@ -417,6 +423,9 @@
+ except socket.timeout, err:
+ self.display_no_subprocess_error()
+ return None
++ # Can't regiter self.tkconsole.stdin, since run.py wants to
++ # call non-TextIO methods on it (such as getvar)
++ # XXX should be renamed to "console"
+ self.rpcclt.register("stdin", self.tkconsole)
+ self.rpcclt.register("stdout", self.tkconsole.stdout)
+ self.rpcclt.register("stderr", self.tkconsole.stderr)
+@@ -870,13 +879,14 @@
+ self.save_stderr = sys.stderr
+ self.save_stdin = sys.stdin
+ from idlelib import IOBinding
++ self.stdin = PseudoInputFile(self)
+ self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
+ self.stderr = PseudoFile(self, "stderr", IOBinding.encoding)
+ self.console = PseudoFile(self, "console", IOBinding.encoding)
+ if not use_subprocess:
+ sys.stdout = self.stdout
+ sys.stderr = self.stderr
+- sys.stdin = self
++ sys.stdin = self.stdin
+ #
+ self.history = self.History(self.text)
+ #
+@@ -1260,6 +1270,8 @@
+ self.encoding = encoding
+
+ def write(self, s):
++ if not isinstance(s, (basestring, bytearray)):
++ raise TypeError('must be string, not ' + type(s).__name__)
+ self.shell.write(s, self.tags)
+
+ def writelines(self, lines):
+@@ -1272,6 +1284,15 @@
+ def isatty(self):
+ return True
+
++class PseudoInputFile(object):
++ def __init__(self, shell):
++ self.readline = shell.readline
++ self.isatty = shell.isatty
++
++ def write(self, s):
++ raise io.UnsupportedOperation("not writable")
++ writelines = write
++
+
+ usage_msg = """\
+
+@@ -1412,8 +1433,10 @@
+
+ if enable_edit:
+ if not (cmd or script):
+- for filename in args:
+- flist.open(filename)
++ for filename in args[:]:
++ if flist.open(filename) is None:
++ # filename is a directory actually, disconsider it
++ args.remove(filename)
+ if not args:
+ flist.new()
+ if enable_shell:
+@@ -1456,7 +1479,8 @@
+ if tkversionwarning:
+ shell.interp.runcommand(''.join(("print('", tkversionwarning, "')")))
+
+- root.mainloop()
++ while flist.inversedict: # keep IDLE running while files are open.
++ root.mainloop()
+ root.destroy()
+
+ if __name__ == "__main__":
+diff -r 70274d53c1dd Lib/idlelib/ReplaceDialog.py
+--- a/Lib/idlelib/ReplaceDialog.py
++++ b/Lib/idlelib/ReplaceDialog.py
+@@ -2,6 +2,8 @@
+
+ from idlelib import SearchEngine
+ from idlelib.SearchDialogBase import SearchDialogBase
++import re
++
+
+ def replace(text):
+ root = text._root()
+@@ -11,6 +13,7 @@
+ dialog = engine._replacedialog
+ dialog.open(text)
+
++
+ class ReplaceDialog(SearchDialogBase):
+
+ title = "Replace Dialog"
+@@ -55,8 +58,22 @@
+
+ def default_command(self, event=None):
+ if self.do_find(self.ok):
+- self.do_replace()
+- self.do_find(0)
++ if self.do_replace(): # Only find next match if replace succeeded.
++ # A bad re can cause a it to fail.
++ self.do_find(0)
++
++ def _replace_expand(self, m, repl):
++ """ Helper function for expanding a regular expression
++ in the replace field, if needed. """
++ if self.engine.isre():
++ try:
++ new = m.expand(repl)
++ except re.error:
++ self.engine.report_error(repl, 'Invalid Replace Expression')
++ new = None
++ else:
++ new = repl
++ return new
+
+ def replace_all(self, event=None):
+ prog = self.engine.getprog()
+@@ -86,7 +103,9 @@
+ line, m = res
+ chars = text.get("%d.0" % line, "%d.0" % (line+1))
+ orig = m.group()
+- new = m.expand(repl)
++ new = self._replace_expand(m, repl)
++ if new is None:
++ break
+ i, j = m.span()
+ first = "%d.%d" % (line, i)
+ last = "%d.%d" % (line, j)
+@@ -103,7 +122,6 @@
+ text.undo_block_stop()
+ if first and last:
+ self.show_hit(first, last)
+- self.close()
+
+ def do_find(self, ok=0):
+ if not self.engine.getprog():
+@@ -138,7 +156,9 @@
+ m = prog.match(chars, col)
+ if not prog:
+ return False
+- new = m.expand(self.replvar.get())
++ new = self._replace_expand(m, self.replvar.get())
++ if new is None:
++ return False
+ text.mark_set("insert", first)
+ text.undo_block_start()
+ if m.group():
+diff -r 70274d53c1dd Lib/idlelib/configDialog.py
+--- a/Lib/idlelib/configDialog.py
++++ b/Lib/idlelib/configDialog.py
+@@ -183,7 +183,7 @@
+ text=' Highlighting Theme ')
+ #frameCustom
+ self.textHighlightSample=Text(frameCustom,relief=SOLID,borderwidth=1,
+- font=('courier',12,''),cursor='hand2',width=21,height=10,
++ font=('courier',12,''),cursor='hand2',width=21,height=11,
+ takefocus=FALSE,highlightthickness=0,wrap=NONE)
+ text=self.textHighlightSample
+ text.bind('<Double-Button-1>',lambda e: 'break')
+@@ -832,8 +832,10 @@
+ fontWeight=tkFont.BOLD
+ else:
+ fontWeight=tkFont.NORMAL
+- self.editFont.config(size=self.fontSize.get(),
++ size=self.fontSize.get()
++ self.editFont.config(size=size,
+ weight=fontWeight,family=fontName)
++ self.textHighlightSample.configure(font=(fontName, size, fontWeight))
+
+ def SetHighlightTarget(self):
+ if self.highlightTarget.get()=='Cursor': #bg not possible
+diff -r 70274d53c1dd Lib/idlelib/configHandler.py
+--- a/Lib/idlelib/configHandler.py
++++ b/Lib/idlelib/configHandler.py
+@@ -595,7 +595,7 @@
+ '<<replace>>': ['<Control-h>'],
+ '<<goto-line>>': ['<Alt-g>'],
+ '<<smart-backspace>>': ['<Key-BackSpace>'],
+- '<<newline-and-indent>>': ['<Key-Return> <Key-KP_Enter>'],
++ '<<newline-and-indent>>': ['<Key-Return>', '<Key-KP_Enter>'],
+ '<<smart-indent>>': ['<Key-Tab>'],
+ '<<indent-region>>': ['<Control-Key-bracketright>'],
+ '<<dedent-region>>': ['<Control-Key-bracketleft>'],
+diff -r 70274d53c1dd Lib/idlelib/macosxSupport.py
+--- a/Lib/idlelib/macosxSupport.py
++++ b/Lib/idlelib/macosxSupport.py
+@@ -37,17 +37,21 @@
+ def tkVersionWarning(root):
+ """
+ Returns a string warning message if the Tk version in use appears to
+- be one known to cause problems with IDLE. The Apple Cocoa-based Tk 8.5
+- that was shipped with Mac OS X 10.6.
++ be one known to cause problems with IDLE.
++ 1. Apple Cocoa-based Tk 8.5.7 shipped with Mac OS X 10.6 is unusable.
++ 2. Apple Cocoa-based Tk 8.5.9 in OS X 10.7 and 10.8 is better but
++ can still crash unexpectedly.
+ """
+
+ if (runningAsOSXApp() and
+- ('AppKit' in root.tk.call('winfo', 'server', '.')) and
+- (root.tk.call('info', 'patchlevel') == '8.5.7') ):
+- return (r"WARNING: The version of Tcl/Tk (8.5.7) in use may"
++ ('AppKit' in root.tk.call('winfo', 'server', '.')) ):
++ patchlevel = root.tk.call('info', 'patchlevel')
++ if patchlevel not in ('8.5.7', '8.5.9'):
++ return False
++ return (r"WARNING: The version of Tcl/Tk ({0}) in use may"
+ r" be unstable.\n"
+ r"Visit http://www.python.org/download/mac/tcltk/"
+- r" for current information.")
++ r" for current information.".format(patchlevel))
+ else:
+ return False
+
+diff -r 70274d53c1dd Lib/idlelib/run.py
+--- a/Lib/idlelib/run.py
++++ b/Lib/idlelib/run.py
+@@ -1,4 +1,5 @@
+ import sys
++import io
+ import linecache
+ import time
+ import socket
+@@ -248,6 +249,43 @@
+ quitting = True
+ thread.interrupt_main()
+
++class _RPCFile(io.TextIOBase):
++ """Wrapper class for the RPC proxy to typecheck arguments
++ that may not support pickling. The base class is there only
++ to support type tests; all implementations come from the remote
++ object."""
++
++ def __init__(self, rpc):
++ super.__setattr__(self, 'rpc', rpc)
++
++ def __getattribute__(self, name):
++ # When accessing the 'rpc' attribute, or 'write', use ours
++ if name in ('rpc', 'write', 'writelines'):
++ return io.TextIOBase.__getattribute__(self, name)
++ # Else only look into the remote object only
++ return getattr(self.rpc, name)
++
++ def __setattr__(self, name, value):
++ return setattr(self.rpc, name, value)
++
++ @staticmethod
++ def _ensure_string(func):
++ def f(self, s):
++ if not isinstance(s, basestring):
++ raise TypeError('must be str, not ' + type(s).__name__)
++ return func(self, s)
++ return f
++
++class _RPCOutputFile(_RPCFile):
++ @_RPCFile._ensure_string
++ def write(self, s):
++ return self.rpc.write(s)
++
++class _RPCInputFile(_RPCFile):
++ @_RPCFile._ensure_string
++ def write(self, s):
++ raise io.UnsupportedOperation("not writable")
++ writelines = write
+
+ class MyHandler(rpc.RPCHandler):
+
+@@ -255,9 +293,10 @@
+ """Override base method"""
+ executive = Executive(self)
+ self.register("exec", executive)
+- sys.stdin = self.console = self.get_remote_proxy("stdin")
+- sys.stdout = self.get_remote_proxy("stdout")
+- sys.stderr = self.get_remote_proxy("stderr")
++ self.console = self.get_remote_proxy("stdin")
++ sys.stdin = _RPCInputFile(self.console)
++ sys.stdout = _RPCOutputFile(self.get_remote_proxy("stdout"))
++ sys.stderr = _RPCOutputFile(self.get_remote_proxy("stderr"))
+ from idlelib import IOBinding
+ sys.stdin.encoding = sys.stdout.encoding = \
+ sys.stderr.encoding = IOBinding.encoding
+diff -r 70274d53c1dd Lib/io.py
+--- a/Lib/io.py
++++ b/Lib/io.py
+@@ -34,15 +34,6 @@
+ """
+ # New I/O library conforming to PEP 3116.
+
+-# XXX edge cases when switching between reading/writing
+-# XXX need to support 1 meaning line-buffered
+-# XXX whenever an argument is None, use the default value
+-# XXX read/write ops should check readable/writable
+-# XXX buffered readinto should work with arbitrary buffer objects
+-# XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG
+-# XXX check writable, readable and seekable in appropriate places
+-
+-
+ __author__ = ("Guido van Rossum <guido@python.org>, "
+ "Mike Verdone <mike.verdone@gmail.com>, "
+ "Mark Russell <mark.russell@zen.co.uk>, "
+diff -r 70274d53c1dd Lib/json/__init__.py
+--- a/Lib/json/__init__.py
++++ b/Lib/json/__init__.py
+@@ -95,7 +95,7 @@
+ "json": "obj"
+ }
+ $ echo '{ 1.2:3.4}' | python -m json.tool
+- Expecting property name: line 1 column 2 (char 2)
++ Expecting property name enclosed in double quotes: line 1 column 2 (char 2)
+ """
+ __version__ = '2.0.9'
+ __all__ = [
+diff -r 70274d53c1dd Lib/json/decoder.py
+--- a/Lib/json/decoder.py
++++ b/Lib/json/decoder.py
+@@ -169,7 +169,8 @@
+ pairs = object_hook(pairs)
+ return pairs, end + 1
+ elif nextchar != '"':
+- raise ValueError(errmsg("Expecting property name", s, end))
++ raise ValueError(errmsg(
++ "Expecting property name enclosed in double quotes", s, end))
+ end += 1
+ while True:
+ key, end = scanstring(s, end, encoding, strict)
+@@ -179,8 +180,7 @@
+ if s[end:end + 1] != ':':
+ end = _w(s, end).end()
+ if s[end:end + 1] != ':':
+- raise ValueError(errmsg("Expecting : delimiter", s, end))
+-
++ raise ValueError(errmsg("Expecting ':' delimiter", s, end))
+ end += 1
+
+ try:
+@@ -209,7 +209,7 @@
+ if nextchar == '}':
+ break
+ elif nextchar != ',':
+- raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
++ raise ValueError(errmsg("Expecting ',' delimiter", s, end - 1))
+
+ try:
+ nextchar = s[end]
+@@ -224,8 +224,8 @@
+
+ end += 1
+ if nextchar != '"':
+- raise ValueError(errmsg("Expecting property name", s, end - 1))
+-
++ raise ValueError(errmsg(
++ "Expecting property name enclosed in double quotes", s, end - 1))
+ if object_pairs_hook is not None:
+ result = object_pairs_hook(pairs)
+ return result, end
+@@ -259,8 +259,7 @@
+ if nextchar == ']':
+ break
+ elif nextchar != ',':
+- raise ValueError(errmsg("Expecting , delimiter", s, end))
+-
++ raise ValueError(errmsg("Expecting ',' delimiter", s, end))
+ try:
+ if s[end] in _ws:
+ end += 1
+diff -r 70274d53c1dd Lib/json/encoder.py
+--- a/Lib/json/encoder.py
++++ b/Lib/json/encoder.py
+@@ -27,8 +27,7 @@
+ ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
+ #ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
+
+-# Assume this produces an infinity on all machines (probably not guaranteed)
+-INFINITY = float('1e66666')
++INFINITY = float('inf')
+ FLOAT_REPR = repr
+
+ def encode_basestring(s):
+diff -r 70274d53c1dd Lib/json/tool.py
+--- a/Lib/json/tool.py
++++ b/Lib/json/tool.py
+@@ -7,7 +7,7 @@
+ "json": "obj"
+ }
+ $ echo '{ 1.2:3.4}' | python -m json.tool
+- Expecting property name: line 1 column 2 (char 2)
++ Expecting property name enclosed in double quotes: line 1 column 2 (char 2)
+
+ """
+ import sys
+diff -r 70274d53c1dd Lib/keyword.py
+--- a/Lib/keyword.py
++++ b/Lib/keyword.py
+@@ -7,7 +7,7 @@
+ To update the symbols in this file, 'cd' to the top directory of
+ the python source tree after building the interpreter and run:
+
+- python Lib/keyword.py
++ ./python Lib/keyword.py
+ """
+
+ __all__ = ["iskeyword", "kwlist"]
+diff -r 70274d53c1dd Lib/lib-tk/Tkinter.py
+--- a/Lib/lib-tk/Tkinter.py
++++ b/Lib/lib-tk/Tkinter.py
+@@ -534,12 +534,19 @@
+
+ The type keyword specifies the form in which the data is
+ to be returned and should be an atom name such as STRING
+- or FILE_NAME. Type defaults to STRING.
++ or FILE_NAME. Type defaults to STRING, except on X11, where the default
++ is to try UTF8_STRING and fall back to STRING.
+
+ This command is equivalent to:
+
+ selection_get(CLIPBOARD)
+ """
++ if 'type' not in kw and self._windowingsystem == 'x11':
++ try:
++ kw['type'] = 'UTF8_STRING'
++ return self.tk.call(('clipboard', 'get') + self._options(kw))
++ except TclError:
++ del kw['type']
+ return self.tk.call(('clipboard', 'get') + self._options(kw))
+
+ def clipboard_clear(self, **kw):
+@@ -621,8 +628,16 @@
+ A keyword parameter selection specifies the name of
+ the selection and defaults to PRIMARY. A keyword
+ parameter displayof specifies a widget on the display
+- to use."""
++ to use. A keyword parameter type specifies the form of data to be
++ fetched, defaulting to STRING except on X11, where UTF8_STRING is tried
++ before STRING."""
+ if 'displayof' not in kw: kw['displayof'] = self._w
++ if 'type' not in kw and self._windowingsystem == 'x11':
++ try:
++ kw['type'] = 'UTF8_STRING'
++ return self.tk.call(('selection', 'get') + self._options(kw))
++ except TclError:
++ del kw['type']
+ return self.tk.call(('selection', 'get') + self._options(kw))
+ def selection_handle(self, command, **kw):
+ """Specify a function COMMAND to call if the X
+@@ -1037,6 +1052,15 @@
+ if displayof is None:
+ return ('-displayof', self._w)
+ return ()
++ @property
++ def _windowingsystem(self):
++ """Internal function."""
++ try:
++ return self._root()._windowingsystem_cached
++ except AttributeError:
++ ws = self._root()._windowingsystem_cached = \
++ self.tk.call('tk', 'windowingsystem')
++ return ws
+ def _options(self, cnf, kw = None):
+ """Internal function."""
+ if kw:
+diff -r 70274d53c1dd Lib/lib-tk/tkSimpleDialog.py
+--- a/Lib/lib-tk/tkSimpleDialog.py
++++ b/Lib/lib-tk/tkSimpleDialog.py
+@@ -200,7 +200,7 @@
+ self.entry = Entry(master, name="entry")
+ self.entry.grid(row=1, padx=5, sticky=W+E)
+
+- if self.initialvalue:
++ if self.initialvalue is not None:
+ self.entry.insert(0, self.initialvalue)
+ self.entry.select_range(0, END)
+
+diff -r 70274d53c1dd Lib/lib-tk/ttk.py
+--- a/Lib/lib-tk/ttk.py
++++ b/Lib/lib-tk/ttk.py
+@@ -1253,7 +1253,7 @@
+
+
+ def exists(self, item):
+- """Returns True if the specified item is present in the three,
++ """Returns True if the specified item is present in the tree,
+ False otherwise."""
+ return bool(self.tk.call(self._w, "exists", item))
+
+diff -r 70274d53c1dd Lib/locale.py
+--- a/Lib/locale.py
++++ b/Lib/locale.py
+@@ -18,6 +18,14 @@
+ import operator
+ import functools
+
++try:
++ _unicode = unicode
++except NameError:
++ # If Python is built without Unicode support, the unicode type
++ # will not exist. Fake one.
++ class _unicode(object):
++ pass
++
+ # Try importing the _locale module.
+ #
+ # If this fails, fall back on a basic 'C' locale emulation.
+@@ -353,7 +361,7 @@
+
+ """
+ # Normalize the locale name and extract the encoding
+- if isinstance(localename, unicode):
++ if isinstance(localename, _unicode):
+ localename = localename.encode('ascii')
+ fullname = localename.translate(_ascii_lower_map)
+ if ':' in fullname:
+@@ -1581,8 +1589,7 @@
+ # to include every locale up to Windows Vista.
+ #
+ # NOTE: this mapping is incomplete. If your language is missing, please
+-# submit a bug report to Python bug manager, which you can find via:
+-# http://www.python.org/dev/
++# submit a bug report to the Python bug tracker at http://bugs.python.org/
+ # Make sure you include the missing language identifier and the suggested
+ # locale code.
+ #
+diff -r 70274d53c1dd Lib/logging/__init__.py
+--- a/Lib/logging/__init__.py
++++ b/Lib/logging/__init__.py
+@@ -1,4 +1,4 @@
+-# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
++# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
+ #
+ # Permission to use, copy, modify, and distribute this software and its
+ # documentation for any purpose and without fee is hereby granted,
+@@ -16,9 +16,9 @@
+
+ """
+ Logging package for Python. Based on PEP 282 and comments thereto in
+-comp.lang.python, and influenced by Apache's log4j system.
++comp.lang.python.
+
+-Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
++Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
+
+ To use, simply 'import logging' and log away!
+ """
+@@ -828,8 +828,12 @@
+ """
+ Flushes the stream.
+ """
+- if self.stream and hasattr(self.stream, "flush"):
+- self.stream.flush()
++ self.acquire()
++ try:
++ if self.stream and hasattr(self.stream, "flush"):
++ self.stream.flush()
++ finally:
++ self.release()
+
+ def emit(self, record):
+ """
+@@ -900,12 +904,16 @@
+ """
+ Closes the stream.
+ """
+- if self.stream:
+- self.flush()
+- if hasattr(self.stream, "close"):
+- self.stream.close()
+- StreamHandler.close(self)
+- self.stream = None
++ self.acquire()
++ try:
++ if self.stream:
++ self.flush()
++ if hasattr(self.stream, "close"):
++ self.stream.close()
++ StreamHandler.close(self)
++ self.stream = None
++ finally:
++ self.release()
+
+ def _open(self):
+ """
+@@ -1165,11 +1173,12 @@
+ if self.isEnabledFor(ERROR):
+ self._log(ERROR, msg, args, **kwargs)
+
+- def exception(self, msg, *args):
++ def exception(self, msg, *args, **kwargs):
+ """
+ Convenience method for logging an ERROR with exception information.
+ """
+- self.error(msg, exc_info=1, *args)
++ kwargs['exc_info'] = 1
++ self.error(msg, *args, **kwargs)
+
+ def critical(self, msg, *args, **kwargs):
+ """
+@@ -1574,12 +1583,13 @@
+ basicConfig()
+ root.error(msg, *args, **kwargs)
+
+-def exception(msg, *args):
++def exception(msg, *args, **kwargs):
+ """
+ Log a message with severity 'ERROR' on the root logger,
+ with exception information.
+ """
+- error(msg, exc_info=1, *args)
++ kwargs['exc_info'] = 1
++ error(msg, *args, **kwargs)
+
+ def warning(msg, *args, **kwargs):
+ """
+diff -r 70274d53c1dd Lib/logging/handlers.py
+--- a/Lib/logging/handlers.py
++++ b/Lib/logging/handlers.py
+@@ -1,4 +1,4 @@
+-# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
++# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
+ #
+ # Permission to use, copy, modify, and distribute this software and its
+ # documentation for any purpose and without fee is hereby granted,
+@@ -16,15 +16,14 @@
+
+ """
+ Additional handlers for the logging package for Python. The core package is
+-based on PEP 282 and comments thereto in comp.lang.python, and influenced by
+-Apache's log4j system.
++based on PEP 282 and comments thereto in comp.lang.python.
+
+-Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
++Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
+
+ To use, simply 'import logging.handlers' and log away!
+ """
+
+-import logging, socket, os, cPickle, struct, time, re
++import errno, logging, socket, os, cPickle, struct, time, re
+ from stat import ST_DEV, ST_INO, ST_MTIME
+
+ try:
+@@ -273,9 +272,10 @@
+ dstAtRollover = time.localtime(newRolloverAt)[-1]
+ if dstNow != dstAtRollover:
+ if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
+- newRolloverAt = newRolloverAt - 3600
++ addend = -3600
+ else: # DST bows out before next rollover, so we need to add an hour
+- newRolloverAt = newRolloverAt + 3600
++ addend = 3600
++ newRolloverAt += addend
+ result = newRolloverAt
+ return result
+
+@@ -327,11 +327,20 @@
+ self.stream.close()
+ self.stream = None
+ # get the time that this sequence started at and make it a TimeTuple
++ currentTime = int(time.time())
++ dstNow = time.localtime(currentTime)[-1]
+ t = self.rolloverAt - self.interval
+ if self.utc:
+ timeTuple = time.gmtime(t)
+ else:
+ timeTuple = time.localtime(t)
++ dstThen = timeTuple[-1]
++ if dstNow != dstThen:
++ if dstNow:
++ addend = 3600
++ else:
++ addend = -3600
++ timeTuple = time.localtime(t + addend)
+ dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
+ if os.path.exists(dfn):
+ os.remove(dfn)
+@@ -347,19 +356,18 @@
+ #print "%s -> %s" % (self.baseFilename, dfn)
+ self.mode = 'w'
+ self.stream = self._open()
+- currentTime = int(time.time())
+ newRolloverAt = self.computeRollover(currentTime)
+ while newRolloverAt <= currentTime:
+ newRolloverAt = newRolloverAt + self.interval
+ #If DST changes and midnight or weekly rollover, adjust for this.
+ if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
+- dstNow = time.localtime(currentTime)[-1]
+ dstAtRollover = time.localtime(newRolloverAt)[-1]
+ if dstNow != dstAtRollover:
+ if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
+- newRolloverAt = newRolloverAt - 3600
++ addend = -3600
+ else: # DST bows out before next rollover, so we need to add an hour
+- newRolloverAt = newRolloverAt + 3600
++ addend = 3600
++ newRolloverAt += addend
+ self.rolloverAt = newRolloverAt
+
+ class WatchedFileHandler(logging.FileHandler):
+@@ -384,11 +392,13 @@
+ """
+ def __init__(self, filename, mode='a', encoding=None, delay=0):
+ logging.FileHandler.__init__(self, filename, mode, encoding, delay)
+- if not os.path.exists(self.baseFilename):
+- self.dev, self.ino = -1, -1
+- else:
+- stat = os.stat(self.baseFilename)
+- self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
++ self.dev, self.ino = -1, -1
++ self._statstream()
++
++ def _statstream(self):
++ if self.stream:
++ sres = os.fstat(self.stream.fileno())
++ self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
+
+ def emit(self, record):
+ """
+@@ -398,19 +408,27 @@
+ has, close the old stream and reopen the file to get the
+ current stream.
+ """
+- if not os.path.exists(self.baseFilename):
+- stat = None
+- changed = 1
+- else:
+- stat = os.stat(self.baseFilename)
+- changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
+- if changed and self.stream is not None:
+- self.stream.flush()
+- self.stream.close()
+- self.stream = self._open()
+- if stat is None:
+- stat = os.stat(self.baseFilename)
+- self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
++ # Reduce the chance of race conditions by stat'ing by path only
++ # once and then fstat'ing our new fd if we opened a new log stream.
++ # See issue #14632: Thanks to John Mulligan for the problem report
++ # and patch.
++ try:
++ # stat the file by path, checking for existence
++ sres = os.stat(self.baseFilename)
++ except OSError as err:
++ if err.errno == errno.ENOENT:
++ sres = None
++ else:
++ raise
++ # compare file system stat with that of our stream file handle
++ if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
++ if self.stream is not None:
++ # we have an open file handle, clean it up
++ self.stream.flush()
++ self.stream.close()
++ # open a new file handle and get new stat info from that fd
++ self.stream = self._open()
++ self._statstream()
+ logging.FileHandler.emit(self, record)
+
+ class SocketHandler(logging.Handler):
+@@ -520,9 +538,16 @@
+ """
+ ei = record.exc_info
+ if ei:
+- dummy = self.format(record) # just to get traceback text into record.exc_text
++ # just to get traceback text into record.exc_text ...
++ dummy = self.format(record)
+ record.exc_info = None # to avoid Unpickleable error
+- s = cPickle.dumps(record.__dict__, 1)
++ # See issue #14436: If msg or args are objects, they may not be
++ # available on the receiving end. So we convert the msg % args
++ # to a string, save it as msg and zap the args.
++ d = dict(record.__dict__)
++ d['msg'] = record.getMessage()
++ d['args'] = None
++ s = cPickle.dumps(d, 1)
+ if ei:
+ record.exc_info = ei # for next handler
+ slen = struct.pack(">L", len(s))
+@@ -563,9 +588,13 @@
+ """
+ Closes the socket.
+ """
+- if self.sock:
+- self.sock.close()
+- self.sock = None
++ self.acquire()
++ try:
++ if self.sock:
++ self.sock.close()
++ self.sock = None
++ finally:
++ self.release()
+ logging.Handler.close(self)
+
+ class DatagramHandler(SocketHandler):
+@@ -742,7 +771,11 @@
+ except socket.error:
+ self.socket.close()
+ self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+- self.socket.connect(address)
++ try:
++ self.socket.connect(address)
++ except socket.error:
++ self.socket.close()
++ raise
+
+ # curious: when talking to the unix-domain '/dev/log' socket, a
+ # zero-terminator seems to be required. this string is placed
+@@ -767,8 +800,12 @@
+ """
+ Closes the socket.
+ """
+- if self.unixsocket:
+- self.socket.close()
++ self.acquire()
++ try:
++ if self.unixsocket:
++ self.socket.close()
++ finally:
++ self.release()
+ logging.Handler.close(self)
+
+ def mapPriority(self, levelName):
+@@ -798,8 +835,6 @@
+ # Message is a string. Convert to bytes as required by RFC 5424
+ if type(msg) is unicode:
+ msg = msg.encode('utf-8')
+- if codecs:
+- msg = codecs.BOM_UTF8 + msg
+ msg = prio + msg
+ try:
+ if self.unixsocket:
+@@ -852,6 +887,7 @@
+ self.toaddrs = toaddrs
+ self.subject = subject
+ self.secure = secure
++ self._timeout = 5.0
+
+ def getSubject(self, record):
+ """
+@@ -874,7 +910,7 @@
+ port = self.mailport
+ if not port:
+ port = smtplib.SMTP_PORT
+- smtp = smtplib.SMTP(self.mailhost, port)
++ smtp = smtplib.SMTP(self.mailhost, port, timeout=self._timeout)
+ msg = self.format(record)
+ msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
+ self.fromaddr,
+@@ -1096,7 +1132,11 @@
+
+ This version just zaps the buffer to empty.
+ """
+- self.buffer = []
++ self.acquire()
++ try:
++ self.buffer = []
++ finally:
++ self.release()
+
+ def close(self):
+ """
+@@ -1144,15 +1184,23 @@
+ records to the target, if there is one. Override if you want
+ different behaviour.
+ """
+- if self.target:
+- for record in self.buffer:
+- self.target.handle(record)
+- self.buffer = []
++ self.acquire()
++ try:
++ if self.target:
++ for record in self.buffer:
++ self.target.handle(record)
++ self.buffer = []
++ finally:
++ self.release()
+
+ def close(self):
+ """
+ Flush, set the target to None and lose the buffer.
+ """
+ self.flush()
+- self.target = None
+- BufferingHandler.close(self)
++ self.acquire()
++ try:
++ self.target = None
++ BufferingHandler.close(self)
++ finally:
++ self.release()
+diff -r 70274d53c1dd Lib/mailbox.py
+--- a/Lib/mailbox.py
++++ b/Lib/mailbox.py
+@@ -561,16 +561,19 @@
+ self._file = f
+ self._toc = None
+ self._next_key = 0
+- self._pending = False # No changes require rewriting the file.
++ self._pending = False # No changes require rewriting the file.
++ self._pending_sync = False # No need to sync the file
+ self._locked = False
+- self._file_length = None # Used to record mailbox size
++ self._file_length = None # Used to record mailbox size
+
+ def add(self, message):
+ """Add message and return assigned key."""
+ self._lookup()
+ self._toc[self._next_key] = self._append_message(message)
+ self._next_key += 1
+- self._pending = True
++ # _append_message appends the message to the mailbox file. We
++ # don't need a full rewrite + rename, sync is enough.
++ self._pending_sync = True
+ return self._next_key - 1
+
+ def remove(self, key):
+@@ -616,6 +619,11 @@
+ def flush(self):
+ """Write any pending changes to disk."""
+ if not self._pending:
++ if self._pending_sync:
++ # Messages have only been added, so syncing the file
++ # is enough.
++ _sync_flush(self._file)
++ self._pending_sync = False
+ return
+
+ # In order to be writing anything out at all, self._toc must
+@@ -649,6 +657,7 @@
+ new_file.write(buffer)
+ new_toc[key] = (new_start, new_file.tell())
+ self._post_message_hook(new_file)
++ self._file_length = new_file.tell()
+ except:
+ new_file.close()
+ os.remove(new_file.name)
+@@ -656,6 +665,9 @@
+ _sync_close(new_file)
+ # self._file is about to get replaced, so no need to sync.
+ self._file.close()
++ # Make sure the new file's mode is the same as the old file's
++ mode = os.stat(self._path).st_mode
++ os.chmod(new_file.name, mode)
+ try:
+ os.rename(new_file.name, self._path)
+ except OSError, e:
+@@ -668,6 +680,7 @@
+ self._file = open(self._path, 'rb+')
+ self._toc = new_toc
+ self._pending = False
++ self._pending_sync = False
+ if self._locked:
+ _lock_file(self._file, dotlock=False)
+
+@@ -704,6 +717,12 @@
+ """Append message to mailbox and return (start, stop) offsets."""
+ self._file.seek(0, 2)
+ before = self._file.tell()
++ if len(self._toc) == 0 and not self._pending:
++ # This is the first message, and the _pre_mailbox_hook
++ # hasn't yet been called. If self._pending is True,
++ # messages have been removed, so _pre_mailbox_hook must
++ # have been called already.
++ self._pre_mailbox_hook(self._file)
+ try:
+ self._pre_message_hook(self._file)
+ offsets = self._install_message(message)
+@@ -1367,9 +1386,9 @@
+ line = message.readline()
+ self._file.write(line.replace('\n', os.linesep))
+ if line == '\n' or line == '':
+- self._file.write('*** EOOH ***' + os.linesep)
+ if first_pass:
+ first_pass = False
++ self._file.write('*** EOOH ***' + os.linesep)
+ message.seek(original_pos)
+ else:
+ break
+diff -r 70274d53c1dd Lib/mimetypes.py
+--- a/Lib/mimetypes.py
++++ b/Lib/mimetypes.py
+@@ -432,6 +432,7 @@
+ '.hdf' : 'application/x-hdf',
+ '.htm' : 'text/html',
+ '.html' : 'text/html',
++ '.ico' : 'image/vnd.microsoft.icon',
+ '.ief' : 'image/ief',
+ '.jpe' : 'image/jpeg',
+ '.jpeg' : 'image/jpeg',
+diff -r 70274d53c1dd Lib/multiprocessing/connection.py
+--- a/Lib/multiprocessing/connection.py
++++ b/Lib/multiprocessing/connection.py
+@@ -186,6 +186,8 @@
+ '''
+ if duplex:
+ s1, s2 = socket.socketpair()
++ s1.setblocking(True)
++ s2.setblocking(True)
+ c1 = _multiprocessing.Connection(os.dup(s1.fileno()))
+ c2 = _multiprocessing.Connection(os.dup(s2.fileno()))
+ s1.close()
+@@ -251,6 +253,7 @@
+ self._socket = socket.socket(getattr(socket, family))
+ try:
+ self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
++ self._socket.setblocking(True)
+ self._socket.bind(address)
+ self._socket.listen(backlog)
+ self._address = self._socket.getsockname()
+@@ -269,6 +272,7 @@
+
+ def accept(self):
+ s, self._last_accepted = self._socket.accept()
++ s.setblocking(True)
+ fd = duplicate(s.fileno())
+ conn = _multiprocessing.Connection(fd)
+ s.close()
+@@ -286,6 +290,7 @@
+ '''
+ family = address_type(address)
+ s = socket.socket( getattr(socket, family) )
++ s.setblocking(True)
+ t = _init_timeout()
+
+ while 1:
+@@ -348,7 +353,10 @@
+ try:
+ win32.ConnectNamedPipe(handle, win32.NULL)
+ except WindowsError, e:
+- if e.args[0] != win32.ERROR_PIPE_CONNECTED:
++ # ERROR_NO_DATA can occur if a client has already connected,
++ # written data and then disconnected -- see Issue 14725.
++ if e.args[0] not in (win32.ERROR_PIPE_CONNECTED,
++ win32.ERROR_NO_DATA):
+ raise
+ return _multiprocessing.PipeConnection(handle)
+
+diff -r 70274d53c1dd Lib/multiprocessing/dummy/__init__.py
+--- a/Lib/multiprocessing/dummy/__init__.py
++++ b/Lib/multiprocessing/dummy/__init__.py
+@@ -70,7 +70,8 @@
+ def start(self):
+ assert self._parent is current_process()
+ self._start_called = True
+- self._parent._children[self] = None
++ if hasattr(self._parent, '_children'):
++ self._parent._children[self] = None
+ threading.Thread.start(self)
+
+ @property
+diff -r 70274d53c1dd Lib/multiprocessing/forking.py
+--- a/Lib/multiprocessing/forking.py
++++ b/Lib/multiprocessing/forking.py
+@@ -336,7 +336,7 @@
+ '''
+ Returns prefix of command line used for spawning a child process
+ '''
+- if process.current_process()._identity==() and is_forking(sys.argv):
++ if getattr(process.current_process(), '_inheriting', False):
+ raise RuntimeError('''
+ Attempt to start a new process before the current process
+ has finished its bootstrapping phase.
+diff -r 70274d53c1dd Lib/multiprocessing/pool.py
+--- a/Lib/multiprocessing/pool.py
++++ b/Lib/multiprocessing/pool.py
+@@ -68,6 +68,23 @@
+ # Code run by worker processes
+ #
+
++class MaybeEncodingError(Exception):
++ """Wraps possible unpickleable errors, so they can be
++ safely sent through the socket."""
++
++ def __init__(self, exc, value):
++ self.exc = repr(exc)
++ self.value = repr(value)
++ super(MaybeEncodingError, self).__init__(self.exc, self.value)
++
++ def __str__(self):
++ return "Error sending result: '%s'. Reason: '%s'" % (self.value,
++ self.exc)
++
++ def __repr__(self):
++ return "<MaybeEncodingError: %s>" % str(self)
++
++
+ def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
+ assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
+ put = outqueue.put
+@@ -96,7 +113,13 @@
+ result = (True, func(*args, **kwds))
+ except Exception, e:
+ result = (False, e)
+- put((job, i, result))
++ try:
++ put((job, i, result))
++ except Exception as e:
++ wrapped = MaybeEncodingError(e, result[1])
++ debug("Possible encoding error while sending result: %s" % (
++ wrapped))
++ put((job, i, (False, wrapped)))
+ completed += 1
+ debug('worker exiting after %d tasks' % completed)
+
+@@ -466,7 +489,8 @@
+ # We must wait for the worker handler to exit before terminating
+ # workers because we don't want workers to be restarted behind our back.
+ debug('joining worker handler')
+- worker_handler.join()
++ if threading.current_thread() is not worker_handler:
++ worker_handler.join(1e100)
+
+ # Terminate workers which haven't already finished.
+ if pool and hasattr(pool[0], 'terminate'):
+@@ -476,10 +500,12 @@
+ p.terminate()
+
+ debug('joining task handler')
+- task_handler.join(1e100)
++ if threading.current_thread() is not task_handler:
++ task_handler.join(1e100)
+
+ debug('joining result handler')
+- result_handler.join(1e100)
++ if threading.current_thread() is not result_handler:
++ result_handler.join(1e100)
+
+ if pool and hasattr(pool[0], 'terminate'):
+ debug('joining pool workers')
+@@ -553,6 +579,7 @@
+ if chunksize <= 0:
+ self._number_left = 0
+ self._ready = True
++ del cache[self._job]
+ else:
+ self._number_left = length//chunksize + bool(length % chunksize)
+
+diff -r 70274d53c1dd Lib/multiprocessing/process.py
+--- a/Lib/multiprocessing/process.py
++++ b/Lib/multiprocessing/process.py
+@@ -262,12 +262,12 @@
+ except SystemExit, e:
+ if not e.args:
+ exitcode = 1
+- elif type(e.args[0]) is int:
++ elif isinstance(e.args[0], int):
+ exitcode = e.args[0]
+ else:
+- sys.stderr.write(e.args[0] + '\n')
++ sys.stderr.write(str(e.args[0]) + '\n')
+ sys.stderr.flush()
+- exitcode = 1
++ exitcode = 0 if isinstance(e.args[0], str) else 1
+ except:
+ exitcode = 1
+ import traceback
+diff -r 70274d53c1dd Lib/numbers.py
+--- a/Lib/numbers.py
++++ b/Lib/numbers.py
+@@ -303,7 +303,7 @@
+ raise NotImplementedError
+
+ def __index__(self):
+- """index(self)"""
++ """Called whenever an index is needed, such as in slicing"""
+ return long(self)
+
+ @abstractmethod
+diff -r 70274d53c1dd Lib/posixpath.py
+--- a/Lib/posixpath.py
++++ b/Lib/posixpath.py
+@@ -17,6 +17,14 @@
+ import warnings
+ from genericpath import *
+
++try:
++ _unicode = unicode
++except NameError:
++ # If Python is built without Unicode support, the unicode type
++ # will not exist. Fake one.
++ class _unicode(object):
++ pass
++
+ __all__ = ["normcase","isabs","join","splitdrive","split","splitext",
+ "basename","dirname","commonprefix","getsize","getmtime",
+ "getatime","getctime","islink","exists","lexists","isdir","isfile",
+@@ -60,7 +68,8 @@
+ def join(a, *p):
+ """Join two or more pathname components, inserting '/' as needed.
+ If any component is an absolute path, all previous path components
+- will be discarded."""
++ will be discarded. An empty last part will result in a path that
++ ends with a separator."""
+ path = a
+ for b in p:
+ if b.startswith('/'):
+@@ -267,8 +276,8 @@
+ except KeyError:
+ return path
+ userhome = pwent.pw_dir
+- userhome = userhome.rstrip('/') or userhome
+- return userhome + path[i:]
++ userhome = userhome.rstrip('/')
++ return (userhome + path[i:]) or '/'
+
+
+ # Expand paths containing shell variable substitutions.
+@@ -312,7 +321,7 @@
+ def normpath(path):
+ """Normalize path, eliminating double slashes, etc."""
+ # Preserve unicode (if path is unicode)
+- slash, dot = (u'/', u'.') if isinstance(path, unicode) else ('/', '.')
++ slash, dot = (u'/', u'.') if isinstance(path, _unicode) else ('/', '.')
+ if path == '':
+ return dot
+ initial_slashes = path.startswith('/')
+@@ -341,7 +350,7 @@
+ def abspath(path):
+ """Return an absolute path."""
+ if not isabs(path):
+- if isinstance(path, unicode):
++ if isinstance(path, _unicode):
+ cwd = os.getcwdu()
+ else:
+ cwd = os.getcwd()
+diff -r 70274d53c1dd Lib/pyclbr.py
+--- a/Lib/pyclbr.py
++++ b/Lib/pyclbr.py
+@@ -128,6 +128,8 @@
+ parent = _readmodule(package, path, inpackage)
+ if inpackage is not None:
+ package = "%s.%s" % (inpackage, package)
++ if not '__path__' in parent:
++ raise ImportError('No package named {}'.format(package))
+ return _readmodule(submodule, parent['__path__'], package)
+
+ # Search the path for the module
+diff -r 70274d53c1dd Lib/pydoc.py
+--- a/Lib/pydoc.py
++++ b/Lib/pydoc.py
+@@ -1498,7 +1498,8 @@
+ raise ImportError, 'no Python documentation found for %r' % thing
+ return object, thing
+ else:
+- return thing, getattr(thing, '__name__', None)
++ name = getattr(thing, '__name__', None)
++ return thing, name if isinstance(name, str) else None
+
+ def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
+ """Render text documentation, given an object or a path to an object."""
+@@ -1799,7 +1800,7 @@
+ Welcome to Python %s! This is the online help utility.
+
+ If this is your first time using Python, you should definitely check out
+-the tutorial on the Internet at http://docs.python.org/tutorial/.
++the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
+
+ Enter the name of any module, keyword, or topic to get help on writing
+ Python programs and using Python modules. To quit this help utility and
+@@ -1809,7 +1810,7 @@
+ "keywords", or "topics". Each module also comes with a one-line summary
+ of what it does; to list the modules whose summaries contain a given word
+ such as "spam", type "modules spam".
+-''' % sys.version[:3])
++''' % tuple([sys.version[:3]]*2))
+
+ def list(self, items, columns=4, width=80):
+ items = items[:]
+diff -r 70274d53c1dd Lib/rlcompleter.py
+--- a/Lib/rlcompleter.py
++++ b/Lib/rlcompleter.py
+@@ -1,13 +1,11 @@
+-"""Word completion for GNU readline 2.0.
++"""Word completion for GNU readline.
+
+-This requires the latest extension to the readline module. The completer
+-completes keywords, built-ins and globals in a selectable namespace (which
+-defaults to __main__); when completing NAME.NAME..., it evaluates (!) the
+-expression up to the last dot and completes its attributes.
++The completer completes keywords, built-ins and globals in a selectable
++namespace (which defaults to __main__); when completing NAME.NAME..., it
++evaluates (!) the expression up to the last dot and completes its attributes.
+
+-It's very cool to do "import sys" type "sys.", hit the
+-completion key (twice), and see the list of names defined by the
+-sys module!
++It's very cool to do "import sys" type "sys.", hit the completion key (twice),
++and see the list of names defined by the sys module!
+
+ Tip: to use the tab key as the completion key, call
+
+@@ -15,18 +13,16 @@
+
+ Notes:
+
+-- Exceptions raised by the completer function are *ignored* (and
+-generally cause the completion to fail). This is a feature -- since
+-readline sets the tty device in raw (or cbreak) mode, printing a
+-traceback wouldn't work well without some complicated hoopla to save,
+-reset and restore the tty state.
++- Exceptions raised by the completer function are *ignored* (and generally cause
++ the completion to fail). This is a feature -- since readline sets the tty
++ device in raw (or cbreak) mode, printing a traceback wouldn't work well
++ without some complicated hoopla to save, reset and restore the tty state.
+
+-- The evaluation of the NAME.NAME... form may cause arbitrary
+-application defined code to be executed if an object with a
+-__getattr__ hook is found. Since it is the responsibility of the
+-application (or the user) to enable this feature, I consider this an
+-acceptable risk. More complicated expressions (e.g. function calls or
+-indexing operations) are *not* evaluated.
++- The evaluation of the NAME.NAME... form may cause arbitrary application
++ defined code to be executed if an object with a __getattr__ hook is found.
++ Since it is the responsibility of the application (or the user) to enable this
++ feature, I consider this an acceptable risk. More complicated expressions
++ (e.g. function calls or indexing operations) are *not* evaluated.
+
+ - GNU readline is also used by the built-in functions input() and
+ raw_input(), and thus these also benefit/suffer from the completer
+@@ -35,7 +31,7 @@
+ its input.
+
+ - When the original stdin is not a tty device, GNU readline is never
+-used, and this module (and the readline module) are silently inactive.
++ used, and this module (and the readline module) are silently inactive.
+
+ """
+
+diff -r 70274d53c1dd Lib/shutil.py
+--- a/Lib/shutil.py
++++ b/Lib/shutil.py
+@@ -102,8 +102,10 @@
+ try:
+ os.chflags(dst, st.st_flags)
+ except OSError, why:
+- if (not hasattr(errno, 'EOPNOTSUPP') or
+- why.errno != errno.EOPNOTSUPP):
++ for err in 'EOPNOTSUPP', 'ENOTSUP':
++ if hasattr(errno, err) and why.errno == getattr(errno, err):
++ break
++ else:
+ raise
+
+ def copy(src, dst):
+diff -r 70274d53c1dd Lib/subprocess.py
+--- a/Lib/subprocess.py
++++ b/Lib/subprocess.py
+@@ -1016,7 +1016,17 @@
+ def terminate(self):
+ """Terminates the process
+ """
+- _subprocess.TerminateProcess(self._handle, 1)
++ try:
++ _subprocess.TerminateProcess(self._handle, 1)
++ except OSError as e:
++ # ERROR_ACCESS_DENIED (winerror 5) is received when the
++ # process already died.
++ if e.winerror != 5:
++ raise
++ rc = _subprocess.GetExitCodeProcess(self._handle)
++ if rc == _subprocess.STILL_ACTIVE:
++ raise
++ self.returncode = rc
+
+ kill = terminate
+
+diff -r 70274d53c1dd Lib/symbol.py
+--- a/Lib/symbol.py
++++ b/Lib/symbol.py
+@@ -7,7 +7,7 @@
+ # To update the symbols in this file, 'cd' to the top directory of
+ # the python source tree after building the interpreter and run:
+ #
+-# python Lib/symbol.py
++# ./python Lib/symbol.py
+
+ #--start constants--
+ single_input = 256
+diff -r 70274d53c1dd Lib/tarfile.py
+--- a/Lib/tarfile.py
++++ b/Lib/tarfile.py
+@@ -2397,7 +2397,7 @@
+ """
+ if tarinfo.issym():
+ # Always search the entire archive.
+- linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
++ linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname)))
+ limit = None
+ else:
+ # Search the archive before the link, because a hard link is
+diff -r 70274d53c1dd Lib/telnetlib.py
+--- a/Lib/telnetlib.py
++++ b/Lib/telnetlib.py
+@@ -34,6 +34,7 @@
+
+
+ # Imported modules
++import errno
+ import sys
+ import socket
+ import select
+@@ -205,6 +206,7 @@
+ self.sb = 0 # flag for SB and SE sequence.
+ self.sbdataq = ''
+ self.option_callback = None
++ self._has_poll = hasattr(select, 'poll')
+ if host is not None:
+ self.open(host, port, timeout)
+
+@@ -287,6 +289,61 @@
+ is closed and no cooked data is available.
+
+ """
++ if self._has_poll:
++ return self._read_until_with_poll(match, timeout)
++ else:
++ return self._read_until_with_select(match, timeout)
++
++ def _read_until_with_poll(self, match, timeout):
++ """Read until a given string is encountered or until timeout.
++
++ This method uses select.poll() to implement the timeout.
++ """
++ n = len(match)
++ call_timeout = timeout
++ if timeout is not None:
++ from time import time
++ time_start = time()
++ self.process_rawq()
++ i = self.cookedq.find(match)
++ if i < 0:
++ poller = select.poll()
++ poll_in_or_priority_flags = select.POLLIN | select.POLLPRI
++ poller.register(self, poll_in_or_priority_flags)
++ while i < 0 and not self.eof:
++ try:
++ ready = poller.poll(call_timeout)
++ except select.error as e:
++ if e.errno == errno.EINTR:
++ if timeout is not None:
++ elapsed = time() - time_start
++ call_timeout = timeout-elapsed
++ continue
++ raise
++ for fd, mode in ready:
++ if mode & poll_in_or_priority_flags:
++ i = max(0, len(self.cookedq)-n)
++ self.fill_rawq()
++ self.process_rawq()
++ i = self.cookedq.find(match, i)
++ if timeout is not None:
++ elapsed = time() - time_start
++ if elapsed >= timeout:
++ break
++ call_timeout = timeout-elapsed
++ poller.unregister(self)
++ if i >= 0:
++ i = i + n
++ buf = self.cookedq[:i]
++ self.cookedq = self.cookedq[i:]
++ return buf
++ return self.read_very_lazy()
++
++ def _read_until_with_select(self, match, timeout=None):
++ """Read until a given string is encountered or until timeout.
++
++ The timeout is implemented using select.select().
++ """
+ n = len(match)
+ self.process_rawq()
+ i = self.cookedq.find(match)
+@@ -589,6 +646,79 @@
+ results are undeterministic, and may depend on the I/O timing.
+
+ """
++ if self._has_poll:
++ return self._expect_with_poll(list, timeout)
++ else:
++ return self._expect_with_select(list, timeout)
++
++ def _expect_with_poll(self, expect_list, timeout=None):
++ """Read until one from a list of a regular expressions matches.
++
++ This method uses select.poll() to implement the timeout.
++ """
++ re = None
++ expect_list = expect_list[:]
++ indices = range(len(expect_list))
++ for i in indices:
++ if not hasattr(expect_list[i], "search"):
++ if not re: import re
++ expect_list[i] = re.compile(expect_list[i])
++ call_timeout = timeout
++ if timeout is not None:
++ from time import time
++ time_start = time()
++ self.process_rawq()
++ m = None
++ for i in indices:
++ m = expect_list[i].search(self.cookedq)
++ if m:
++ e = m.end()
++ text = self.cookedq[:e]
++ self.cookedq = self.cookedq[e:]
++ break
++ if not m:
++ poller = select.poll()
++ poll_in_or_priority_flags = select.POLLIN | select.POLLPRI
++ poller.register(self, poll_in_or_priority_flags)
++ while not m and not self.eof:
++ try:
++ ready = poller.poll(call_timeout)
++ except select.error as e:
++ if e.errno == errno.EINTR:
++ if timeout is not None:
++ elapsed = time() - time_start
++ call_timeout = timeout-elapsed
++ continue
++ raise
++ for fd, mode in ready:
++ if mode & poll_in_or_priority_flags:
++ self.fill_rawq()
++ self.process_rawq()
++ for i in indices:
++ m = expect_list[i].search(self.cookedq)
++ if m:
++ e = m.end()
++ text = self.cookedq[:e]
++ self.cookedq = self.cookedq[e:]
++ break
++ if timeout is not None:
++ elapsed = time() - time_start
++ if elapsed >= timeout:
++ break
++ call_timeout = timeout-elapsed
++ poller.unregister(self)
++ if m:
++ return (i, m, text)
++ text = self.read_very_lazy()
++ if not text and self.eof:
++ raise EOFError
++ return (-1, None, text)
++
++ def _expect_with_select(self, list, timeout=None):
++ """Read until one from a list of a regular expressions matches.
++
++ The timeout is implemented using select.select().
++ """
+ re = None
+ list = list[:]
+ indices = range(len(list))
+diff -r 70274d53c1dd Lib/test/crashers/buffer_mutate.py
+--- /dev/null
++++ b/Lib/test/crashers/buffer_mutate.py
+@@ -0,0 +1,30 @@
++#
++# The various methods of bufferobject.c (here buffer_subscript()) call
++# get_buf() before calling potentially more Python code (here via
++# PySlice_GetIndicesEx()). But get_buf() already returned a void*
++# pointer. This void* pointer can become invalid if the object
++# underlying the buffer is mutated (here a bytearray object).
++#
++# As usual, please keep in mind that the three "here" in the sentence
++# above are only examples. Each can be changed easily and lead to
++# another crasher.
++#
++# This crashes for me on Linux 32-bits with CPython 2.6 and 2.7
++# with a segmentation fault.
++#
++
++
++class PseudoIndex(object):
++ def __index__(self):
++ for c in "foobar"*n:
++ a.append(c)
++ return n * 4
++
++
++for n in range(1, 100000, 100):
++ a = bytearray("test"*n)
++ buf = buffer(a)
++
++ s = buf[:PseudoIndex():1]
++ #print repr(s)
++ #assert s == "test"*n
+diff -r 70274d53c1dd Lib/test/crashers/decref_before_assignment.py
+--- /dev/null
++++ b/Lib/test/crashers/decref_before_assignment.py
+@@ -0,0 +1,44 @@
++"""
++General example for an attack against code like this:
++
++ Py_DECREF(obj->attr); obj->attr = ...;
++
++here in Module/_json.c:scanner_init().
++
++Explanation: if the first Py_DECREF() calls either a __del__ or a
++weakref callback, it will run while the 'obj' appears to have in
++'obj->attr' still the old reference to the object, but not holding
++the reference count any more.
++
++Status: progress has been made replacing these cases, but there is an
++infinite number of such cases.
++"""
++
++import _json, weakref
++
++class Ctx1(object):
++ encoding = "utf8"
++ strict = None
++ object_hook = None
++ object_pairs_hook = None
++ parse_float = None
++ parse_int = None
++ parse_constant = None
++
++class Foo(unicode):
++ pass
++
++def delete_me(*args):
++ print scanner.encoding.__dict__
++
++class Ctx2(Ctx1):
++ @property
++ def encoding(self):
++ global wref
++ f = Foo("utf8")
++ f.abc = globals()
++ wref = weakref.ref(f, delete_me)
++ return f
++
++scanner = _json.make_scanner(Ctx1())
++scanner.__init__(Ctx2())
+diff -r 70274d53c1dd Lib/test/mp_fork_bomb.py
+--- /dev/null
++++ b/Lib/test/mp_fork_bomb.py
+@@ -0,0 +1,16 @@
++import multiprocessing
++
++def foo(conn):
++ conn.send("123")
++
++# Because "if __name__ == '__main__'" is missing this will not work
++# correctly on Windows. However, we should get a RuntimeError rather
++# than the Windows equivalent of a fork bomb.
++
++r, w = multiprocessing.Pipe(False)
++p = multiprocessing.Process(target=foo, args=(w,))
++p.start()
++w.close()
++print(r.recv())
++r.close()
++p.join()
+diff -r 70274d53c1dd Lib/test/regrtest.py
+--- a/Lib/test/regrtest.py
++++ b/Lib/test/regrtest.py
+@@ -540,6 +540,8 @@
+ print stdout
+ if stderr:
+ print >>sys.stderr, stderr
++ sys.stdout.flush()
++ sys.stderr.flush()
+ if result[0] == INTERRUPTED:
+ assert result[1] == 'KeyboardInterrupt'
+ raise KeyboardInterrupt # What else?
+@@ -758,7 +760,9 @@
+ # the corresponding method names.
+
+ resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
+- 'os.environ', 'sys.path', 'asyncore.socket_map')
++ 'os.environ', 'sys.path', 'asyncore.socket_map',
++ 'test_support.TESTFN',
++ )
+
+ def get_sys_argv(self):
+ return id(sys.argv), sys.argv, sys.argv[:]
+@@ -809,6 +813,21 @@
+ asyncore.close_all(ignore_all=True)
+ asyncore.socket_map.update(saved_map)
+
++ def get_test_support_TESTFN(self):
++ if os.path.isfile(test_support.TESTFN):
++ result = 'f'
++ elif os.path.isdir(test_support.TESTFN):
++ result = 'd'
++ else:
++ result = None
++ return result
++ def restore_test_support_TESTFN(self, saved_value):
++ if saved_value is None:
++ if os.path.isfile(test_support.TESTFN):
++ os.unlink(test_support.TESTFN)
++ elif os.path.isdir(test_support.TESTFN):
++ shutil.rmtree(test_support.TESTFN)
++
+ def resource_info(self):
+ for name in self.resources:
+ method_suffix = name.replace('.', '_')
+diff -r 70274d53c1dd Lib/test/script_helper.py
+--- a/Lib/test/script_helper.py
++++ b/Lib/test/script_helper.py
+@@ -10,7 +10,13 @@
+ import py_compile
+ import contextlib
+ import shutil
+-import zipfile
++try:
++ import zipfile
++except ImportError:
++ # If Python is build without Unicode support, importing _io will
++ # fail, which, in turn, means that zipfile cannot be imported
++ # Most of this module can then still be used.
++ pass
+
+ from test.test_support import strip_python_stderr
+
+diff -r 70274d53c1dd Lib/test/sha256.pem
+--- a/Lib/test/sha256.pem
++++ b/Lib/test/sha256.pem
+@@ -1,129 +1,128 @@
+ # Certificate chain for https://sha256.tbs-internet.com
+- 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=sha-256 production/CN=sha256.tbs-internet.com
+- i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC
++ 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=Certificats TBS X509/CN=ecom.tbs-x509.com
++ i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business
+ -----BEGIN CERTIFICATE-----
+-MIIGXTCCBUWgAwIBAgIRAMmag+ygSAdxZsbyzYjhuW0wDQYJKoZIhvcNAQELBQAw
+-gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl
++MIIGTjCCBTagAwIBAgIQOh3d9dNDPq1cSdJmEiMpqDANBgkqhkiG9w0BAQUFADCB
++yTELMAkGA1UEBhMCRlIxETAPBgNVBAgTCENhbHZhZG9zMQ0wCwYDVQQHEwRDYWVu
++MRUwEwYDVQQKEwxUQlMgSU5URVJORVQxSDBGBgNVBAsTP1Rlcm1zIGFuZCBDb25k
++aXRpb25zOiBodHRwOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0EvcmVwb3NpdG9y
++eTEYMBYGA1UECxMPVEJTIElOVEVSTkVUIENBMR0wGwYDVQQDExRUQlMgWDUwOSBD
++QSBidXNpbmVzczAeFw0xMTAxMjUwMDAwMDBaFw0xMzAyMDUyMzU5NTlaMIHHMQsw
++CQYDVQQGEwJGUjEOMAwGA1UEERMFMTQwMDAxETAPBgNVBAgTCENhbHZhZG9zMQ0w
++CwYDVQQHEwRDQUVOMRswGQYDVQQJExIyMiBydWUgZGUgQnJldGFnbmUxFTATBgNV
++BAoTDFRCUyBJTlRFUk5FVDEXMBUGA1UECxMOMDAwMiA0NDA0NDM4MTAxHTAbBgNV
++BAsTFENlcnRpZmljYXRzIFRCUyBYNTA5MRowGAYDVQQDExFlY29tLnRicy14NTA5
++LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKRrlHUnJ++1lpcg
++jtYco7cdmRe+EEfTmwPfCdfV3G1QfsTSvY6FfMpm/83pqHfT+4ANwr18wD9ZrAEN
++G16mf9VdCGK12+TP7DmqeZyGIqlFFoahQnmb8EarvE43/1UeQ2CV9XmzwZvpqeli
++LfXsFonawrY3H6ZnMwS64St61Z+9gdyuZ/RbsoZBbT5KUjDEG844QRU4OT1IGeEI
++eY5NM5RNIh6ZNhVtqeeCxMS7afONkHQrOco73RdSTRck/Hj96Ofl3MHNHryr+AMK
++DGFk1kLCZGpPdXtkxXvaDeQoiYDlil26CWc+YK6xyDPMdsWvoG14ZLyCpzMXA7/7
++4YAQRH0CAwEAAaOCAjAwggIsMB8GA1UdIwQYMBaAFBoJBMz5CY+7HqDO1KQUf0vV
++I1jNMB0GA1UdDgQWBBQgOU8HsWzbmD4WZP5Wtdw7jca2WDAOBgNVHQ8BAf8EBAMC
++BaAwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw
++TAYDVR0gBEUwQzBBBgsrBgEEAYDlNwIBATAyMDAGCCsGAQUFBwIBFiRodHRwczov
++L3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL0NQUzEwdwYDVR0fBHAwbjA3oDWgM4Yx
++aHR0cDovL2NybC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNy
++bDAzoDGgL4YtaHR0cDovL2NybC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5l
++c3MuY3JsMIGwBggrBgEFBQcBAQSBozCBoDA9BggrBgEFBQcwAoYxaHR0cDovL2Ny
++dC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNydDA5BggrBgEF
++BQcwAoYtaHR0cDovL2NydC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5lc3Mu
++Y3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wMwYDVR0R
++BCwwKoIRZWNvbS50YnMteDUwOS5jb22CFXd3dy5lY29tLnRicy14NTA5LmNvbTAN
++BgkqhkiG9w0BAQUFAAOCAQEArT4NHfbY87bGAw8lPV4DmHlmuDuVp/y7ltO3Ynse
++3Rz8RxW2AzuO0Oy2F0Cu4yWKtMyEyMXyHqWtae7ElRbdTu5w5GwVBLJHClCzC8S9
++SpgMMQTx3Rgn8vjkHuU9VZQlulZyiPK7yunjc7c310S9FRZ7XxOwf8Nnx4WnB+No
++WrfApzhhQl31w+RyrNxZe58hCfDDHmevRvwLjQ785ZoQXJDj2j3qAD4aI2yB8lB5
++oaE1jlCJzC7Kmz/Y9jzfmv/zAs1LQTm9ktevv4BTUFaGjv9jxnQ1xnS862ZiouLW
++zZYIlYPf4F6JjXGiIQgQRglILUfq3ftJd9/ok9W9ZF8h8w==
++-----END CERTIFICATE-----
++ 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business
++ i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root
++-----BEGIN CERTIFICATE-----
++MIIFPzCCBCegAwIBAgIQDlBz/++iRSmLDeVRHT/hADANBgkqhkiG9w0BAQUFADBv
++MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk
++ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF
++eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDcwOTE4MTkyMlow
++gckxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl
+ bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u
+ ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv
+-cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg
+-Q0EgU0dDMB4XDTEwMDIxODAwMDAwMFoXDTEyMDIxOTIzNTk1OVowgcsxCzAJBgNV
+-BAYTAkZSMQ4wDAYDVQQREwUxNDAwMDERMA8GA1UECBMIQ2FsdmFkb3MxDTALBgNV
+-BAcTBENBRU4xGzAZBgNVBAkTEjIyIHJ1ZSBkZSBCcmV0YWduZTEVMBMGA1UEChMM
+-VEJTIElOVEVSTkVUMRcwFQYDVQQLEw4wMDAyIDQ0MDQ0MzgxMDEbMBkGA1UECxMS
+-c2hhLTI1NiBwcm9kdWN0aW9uMSAwHgYDVQQDExdzaGEyNTYudGJzLWludGVybmV0
+-LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbuM8VT7f0nntwu
+-N3F7v9KIBlhKNAxqCrziOXU5iqUt8HrQB3DtHbdmII+CpVUlwlmepsx6G+srEZ9a
+-MIGAy0nxi5aLb7watkyIdPjJTMvTUBQ/+RPWzt5JtYbbY9BlJ+yci0dctP74f4NU
+-ISLtlrEjUbf2gTohLrcE01TfmOF6PDEbB5PKDi38cB3NzKfizWfrOaJW6Q1C1qOJ
+-y4/4jkUREX1UFUIxzx7v62VfjXSGlcjGpBX1fvtABQOSLeE0a6gciDZs1REqroFf
+-5eXtqYphpTa14Z83ITXMfgg5Nze1VtMnzI9Qx4blYBw4dgQVEuIsYr7FDBOITDzc
+-VEVXZx0CAwEAAaOCAj8wggI7MB8GA1UdIwQYMBaAFAdEdoWTKLx/bXjSCuv6TEvf
+-2YIfMB0GA1UdDgQWBBSJKI/AYVI9RQNY0QPIqc8ej2QivTAOBgNVHQ8BAf8EBAMC
+-BaAwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAQYIKwYBBQUHAwIG
+-CisGAQQBgjcKAwMGCWCGSAGG+EIEATBMBgNVHSAERTBDMEEGCysGAQQBgOU3AgQB
+-MDIwMAYIKwYBBQUHAgEWJGh0dHBzOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0Ev
+-Q1BTNDBtBgNVHR8EZjBkMDKgMKAuhixodHRwOi8vY3JsLnRicy1pbnRlcm5ldC5j
+-b20vVEJTWDUwOUNBU0dDLmNybDAuoCygKoYoaHR0cDovL2NybC50YnMteDUwOS5j
+-b20vVEJTWDUwOUNBU0dDLmNybDCBpgYIKwYBBQUHAQEEgZkwgZYwOAYIKwYBBQUH
+-MAKGLGh0dHA6Ly9jcnQudGJzLWludGVybmV0LmNvbS9UQlNYNTA5Q0FTR0MuY3J0
+-MDQGCCsGAQUFBzAChihodHRwOi8vY3J0LnRicy14NTA5LmNvbS9UQlNYNTA5Q0FT
+-R0MuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wPwYD
+-VR0RBDgwNoIXc2hhMjU2LnRicy1pbnRlcm5ldC5jb22CG3d3dy5zaGEyNTYudGJz
+-LWludGVybmV0LmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAA5NL0D4QSqhErhlkdPmz
+-XtiMvdGL+ZehM4coTRIpasM/Agt36Rc0NzCvnQwKE+wkngg1Gy2qe7Q0E/ziqBtB
+-fZYzdVgu1zdiL4kTaf+wFKYAFGsFbyeEmXysy+CMwaNoF2vpSjCU1UD56bEnTX/W
+-fxVZYxtBQUpnu2wOsm8cDZuZRv9XrYgAhGj9Tt6F0aVHSDGn59uwShG1+BVF/uju
+-SCyPTTjL1oc7YElJUzR/x4mQJYvtQI8gDIDAGEOs7v3R/gKa5EMfbUQUI4C84UbI
+-Yz09Jdnws/MkC/Hm1BZEqk89u7Hvfv+oHqEb0XaUo0TDfsxE0M1sMdnLb91QNQBm
+-UQ==
+------END CERTIFICATE-----
+- 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC
+- i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root
+------BEGIN CERTIFICATE-----
+-MIIFVjCCBD6gAwIBAgIQXpDZ0ETJMV02WTx3GTnhhTANBgkqhkiG9w0BAQUFADBv
+-MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk
+-ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF
+-eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDYyNDE5MDYzMFow
+-gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl
+-bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u
+-ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv
+-cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg
+-Q0EgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgOkO3f7wzN6
+-rOjg45tR5vjBfzK7qmV9IBxb/QW9EEXxG+E7FNhZqQLtwGBKoSsHTnQqV75wWMk0
+-9tinWvftBkSpj5sTi/8cbzJfUvTSVYh3Qxv6AVVjMMH/ruLjE6y+4PoaPs8WoYAQ
+-ts5R4Z1g8c/WnTepLst2x0/Wv7GmuoQi+gXvHU6YrBiu7XkeYhzc95QdviWSJRDk
+-owhb5K43qhcvjRmBfO/paGlCliDGZp8mHwrI21mwobWpVjTxZRwYO3bd4+TGcI4G
+-Ie5wmHwE8F7SK1tgSqbBacKjDa93j7txKkfz/Yd2n7TGqOXiHPsJpG655vrKtnXk
+-9vs1zoDeJQIDAQABo4IBljCCAZIwHQYDVR0OBBYEFAdEdoWTKLx/bXjSCuv6TEvf
+-2YIfMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMCAGA1UdJQQZ
+-MBcGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAYBgNVHSAEETAPMA0GCysGAQQBgOU3
+-AgQBMHsGA1UdHwR0MHIwOKA2oDSGMmh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL0Fk
+-ZFRydXN0RXh0ZXJuYWxDQVJvb3QuY3JsMDagNKAyhjBodHRwOi8vY3JsLmNvbW9k
+-by5uZXQvQWRkVHJ1c3RFeHRlcm5hbENBUm9vdC5jcmwwgYAGCCsGAQUFBwEBBHQw
+-cjA4BggrBgEFBQcwAoYsaHR0cDovL2NydC5jb21vZG9jYS5jb20vQWRkVHJ1c3RV
+-VE5TR0NDQS5jcnQwNgYIKwYBBQUHMAKGKmh0dHA6Ly9jcnQuY29tb2RvLm5ldC9B
+-ZGRUcnVzdFVUTlNHQ0NBLmNydDARBglghkgBhvhCAQEEBAMCAgQwDQYJKoZIhvcN
+-AQEFBQADggEBAK2zEzs+jcIrVK9oDkdDZNvhuBYTdCfpxfFs+OAujW0bIfJAy232
+-euVsnJm6u/+OrqKudD2tad2BbejLLXhMZViaCmK7D9nrXHx4te5EP8rL19SUVqLY
+-1pTnv5dhNgEgvA7n5lIzDSYs7yRLsr7HJsYPr6SeYSuZizyX1SNz7ooJ32/F3X98
+-RB0Mlc/E0OyOrkQ9/y5IrnpnaSora8CnUrV5XNOg+kyCz9edCyx4D5wXYcwZPVWz
+-8aDqquESrezPyjtfi4WRO4s/VD3HLZvOxzMrWAVYCDG9FxaOhF0QGuuG1F7F3GKV
+-v6prNyCl016kRl2j1UT+a7gLd8fA25A4C9E=
++cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEdMBsGA1UEAxMUVEJTIFg1MDkg
++Q0EgYnVzaW5lc3MwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB1PAU
++qudCcz3tmyGcf+u6EkZqonKKHrV4gZYbvVkIRojmmlhfi/jwvpHvo8bqSt/9Rj5S
++jhCDW0pcbI+IPPtD1Jy+CHNSfnMqVDy6CKQ3p5maTzCMG6ZT+XjnvcND5v+FtaiB
++xk1iCX6uvt0jeUtdZvYbyytsSDE6c3Y5//wRxOF8tM1JxibwO3pyER26jbbN2gQz
++m/EkdGjLdJ4svPk23WDAvQ6G0/z2LcAaJB+XLfqRwfQpHQvfKa1uTi8PivC8qtip
++rmNQMMPMjxSK2azX8cKjjTDJiUKaCb4VHlJDWKEsCFRpgJAoAuX8f7Yfs1M4esGo
++sWb3PGspK3O22uIlAgMBAAGjggF6MIIBdjAdBgNVHQ4EFgQUGgkEzPkJj7seoM7U
++pBR/S9UjWM0wDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwGAYD
++VR0gBBEwDzANBgsrBgEEAYDlNwIBATB7BgNVHR8EdDByMDigNqA0hjJodHRwOi8v
++Y3JsLmNvbW9kb2NhLmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LmNybDA2oDSg
++MoYwaHR0cDovL2NybC5jb21vZG8ubmV0L0FkZFRydXN0RXh0ZXJuYWxDQVJvb3Qu
++Y3JsMIGGBggrBgEFBQcBAQR6MHgwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29t
++b2RvY2EuY29tL0FkZFRydXN0VVROU2VydmVyQ0EuY3J0MDkGCCsGAQUFBzAChi1o
++dHRwOi8vY3J0LmNvbW9kby5uZXQvQWRkVHJ1c3RVVE5TZXJ2ZXJDQS5jcnQwEQYJ
++YIZIAYb4QgEBBAQDAgIEMA0GCSqGSIb3DQEBBQUAA4IBAQA7mqrMgk/MrE6QnbNA
++h4nRCn2ti4bg4w2C3lB6bSvRPnYwuNw9Jb8vuKkNFzRDxNJXqVDZdfFW5CVQJuyd
++nfAx83+wk+spzvFaE1KhFYfN9G9pQfXUfvDRoIcJgPEKUXL1wRiOG+IjU3VVI8pg
++IgqHkr7ylln5i5zCiFAPuIJmYUSFg/gxH5xkCNcjJqqrHrHatJr6Qrrke93joupw
++oU1njfAcZtYp6fbiK6u2b1pJqwkVBE8RsfLnPhRj+SFbpvjv8Od7o/ieJhFIYQNU
++k2jX2u8qZnAiNw93LZW9lpYjtuvMXq8QQppENNja5b53q7UwI+lU7ZGjZ7quuESp
++J6/5
+ -----END CERTIFICATE-----
+ 2 s:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root
+- i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC
++ i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware
+ -----BEGIN CERTIFICATE-----
+-MIIEZjCCA06gAwIBAgIQUSYKkxzif5zDpV954HKugjANBgkqhkiG9w0BAQUFADCB
+-kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
++MIIETzCCAzegAwIBAgIQHM5EYpUZep1jUvnyI6m2mDANBgkqhkiG9w0BAQUFADCB
++lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+ Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
+-dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw
+-IFNHQzAeFw0wNTA2MDcwODA5MTBaFw0xOTA2MjQxOTA2MzBaMG8xCzAJBgNVBAYT
+-AlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0
+-ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB
+-IFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39xoz5vIABC05
+-4E5b7R+8bA/Ntfojts7emxEzl6QpTH2Tn71KvJPtAxrjj8/lbVBa1pcplFqAsEl6
+-2y6V/bjKvzc4LR4+kUGtcFbH8E8/6DKedMrIkFTpxl8PeJ2aQDwOrGGqXhSPnoeh
+-alDc15pOrwWzpnGUnHGzUGAKxxOdOAeGAqjpqGkmGJCrTLBPI6s6T4TY386f4Wlv
+-u9dC12tE5Met7m1BX3JacQg3s3llpFmglDf3AC8NwpJy2tA4ctsUqEXEXSp9t7TW
+-xO6szRNEt8kr3UMAJfphuWlqWCMRt6czj1Z1WfXNKddGtworZbbTQm8Vsrh7++/p
+-XVPVNFonAgMBAAGjgdgwgdUwHwYDVR0jBBgwFoAUUzLRs89/+uDxoF2FTpLSnkUd
+-tE8wHQYDVR0OBBYEFK29mHo0tCb3+sQmVO8DveAky1QaMA4GA1UdDwEB/wQEAwIB
+-BjAPBgNVHRMBAf8EBTADAQH/MBEGCWCGSAGG+EIBAQQEAwIBAjAgBgNVHSUEGTAX
+-BgorBgEEAYI3CgMDBglghkgBhvhCBAEwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDov
+-L2NybC51c2VydHJ1c3QuY29tL1VUTi1EQVRBQ29ycFNHQy5jcmwwDQYJKoZIhvcN
+-AQEFBQADggEBAMbuUxdoFLJRIh6QWA2U/b3xcOWGLcM2MY9USEbnLQg3vGwKYOEO
+-rVE04BKT6b64q7gmtOmWPSiPrmQH/uAB7MXjkesYoPF1ftsK5p+R26+udd8jkWjd
+-FwBaS/9kbHDrARrQkNnHptZt9hPk/7XJ0h4qy7ElQyZ42TCbTg0evmnv3+r+LbPM
+-+bDdtRTKkdSytaX7ARmjR3mfnYyVhzT4HziS2jamEfpr62vp3EV4FTkG101B5CHI
+-3C+H0be/SGB1pWLLJN47YaApIKa+xWycxOkKaSLvkTr6Jq/RW0GnOuL4OAdCq8Fb
+-+M5tug8EPzI0rNwEKNdwMBQmBsTkm5jVz3g=
++dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt
++SGFyZHdhcmUwHhcNMDUwNjA3MDgwOTEwWhcNMTkwNzA5MTgxOTIyWjBvMQswCQYD
++VQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0
++IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5h
++bCBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt/caM+by
++AAQtOeBOW+0fvGwPzbX6I7bO3psRM5ekKUx9k5+9SryT7QMa44/P5W1QWtaXKZRa
++gLBJetsulf24yr83OC0ePpFBrXBWx/BPP+gynnTKyJBU6cZfD3idmkA8Dqxhql4U
++j56HoWpQ3NeaTq8Fs6ZxlJxxs1BgCscTnTgHhgKo6ahpJhiQq0ywTyOrOk+E2N/O
++n+Fpb7vXQtdrROTHre5tQV9yWnEIN7N5ZaRZoJQ39wAvDcKSctrQOHLbFKhFxF0q
++fbe01sTurM0TRLfJK91DACX6YblpalgjEbenM49WdVn1zSnXRrcKK2W200JvFbK4
++e/vv6V1T1TRaJwIDAQABo4G9MIG6MB8GA1UdIwQYMBaAFKFyXyYbKJhDlV0HN9WF
++lp1L0sNFMB0GA1UdDgQWBBStvZh6NLQm9/rEJlTvA73gJMtUGjAOBgNVHQ8BAf8E
++BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zARBglghkgBhvhCAQEEBAMCAQIwRAYDVR0f
++BD0wOzA5oDegNYYzaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly
++c3QtSGFyZHdhcmUuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQByQhANOs4kClrwF8BW
++onvUOGCSjRK52zYZgDXYNjDtmr5rJ6NyPFDNn+JxkLpjYetIFMTbSRe679Bt8m7a
++gIAoQYFQtxMuyLnJegB2aEbQiIxh/tC21UcFF7ktdnDoTlA6w3pLuvunaI84Of3o
++2YBrhzkTbCfaYk5JRlTpudW9DkUkHBsyx3nknPKnplkIGaK0jgn8E0n+SFabYaHk
++I9LroYT/+JtLefh9lgBdAgVv0UPbzoGfuDsrk/Zh+UrgbLFpHoVnElhzbkh64Z0X
++OGaJunQc68cCZu5HTn/aK7fBGMcVflRCXLVEQpU9PIAdGA8Ynvg684t8GMaKsRl1
++jIGZ
+ -----END CERTIFICATE-----
+- 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC
+- i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC
++ 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware
++ i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware
+ -----BEGIN CERTIFICATE-----
+-MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB
+-kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
++MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB
++lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+ Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
+-dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw
+-IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG
+-EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD
+-VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu
+-dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN
+-BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6
+-E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ
+-D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK
+-4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq
+-lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW
+-bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB
+-o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT
+-MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js
+-LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr
+-BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB
+-AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft
+-Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj
+-j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH
+-KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv
+-2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3
+-mfnGV/TJVTl4uix5yaaIK/QI
++dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt
++SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG
++A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe
++MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v
++d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh
++cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn
++0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ
++M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a
++MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd
++oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI
++DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy
++oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD
++VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0
++dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy
++bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF
++BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM
++//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli
++CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE
++CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t
++3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS
++KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA==
+ -----END CERTIFICATE-----
+diff -r 70274d53c1dd Lib/test/symlink_support.py
+--- /dev/null
++++ b/Lib/test/symlink_support.py
+@@ -0,0 +1,100 @@
++import os
++import unittest
++import platform
++
++from test.test_support import TESTFN
++
++def can_symlink():
++ # cache the result in can_symlink.prev_val
++ prev_val = getattr(can_symlink, 'prev_val', None)
++ if prev_val is not None:
++ return prev_val
++ symlink_path = TESTFN + "can_symlink"
++ try:
++ symlink(TESTFN, symlink_path)
++ can = True
++ except (OSError, NotImplementedError, AttributeError):
++ can = False
++ else:
++ os.remove(symlink_path)
++ can_symlink.prev_val = can
++ return can
++
++def skip_unless_symlink(test):
++ """Skip decorator for tests that require functional symlink"""
++ ok = can_symlink()
++ msg = "Requires functional symlink implementation"
++ return test if ok else unittest.skip(msg)(test)
++
++def _symlink_win32(target, link, target_is_directory=False):
++ """
++ Ctypes symlink implementation since Python doesn't support
++ symlinks in windows yet. Borrowed from jaraco.windows project.
++ """
++ import ctypes.wintypes
++ CreateSymbolicLink = ctypes.windll.kernel32.CreateSymbolicLinkW
++ CreateSymbolicLink.argtypes = (
++ ctypes.wintypes.LPWSTR,
++ ctypes.wintypes.LPWSTR,
++ ctypes.wintypes.DWORD,
++ )
++ CreateSymbolicLink.restype = ctypes.wintypes.BOOLEAN
++
++ def format_system_message(errno):
++ """
++ Call FormatMessage with a system error number to retrieve
++ the descriptive error message.
++ """
++ # first some flags used by FormatMessageW
++ ALLOCATE_BUFFER = 0x100
++ ARGUMENT_ARRAY = 0x2000
++ FROM_HMODULE = 0x800
++ FROM_STRING = 0x400
++ FROM_SYSTEM = 0x1000
++ IGNORE_INSERTS = 0x200
++
++ # Let FormatMessageW allocate the buffer (we'll free it below)
++ # Also, let it know we want a system error message.
++ flags = ALLOCATE_BUFFER | FROM_SYSTEM
++ source = None
++ message_id = errno
++ language_id = 0
++ result_buffer = ctypes.wintypes.LPWSTR()
++ buffer_size = 0
++ arguments = None
++ bytes = ctypes.windll.kernel32.FormatMessageW(
++ flags,
++ source,
++ message_id,
++ language_id,
++ ctypes.byref(result_buffer),
++ buffer_size,
++ arguments,
++ )
++ # note the following will cause an infinite loop if GetLastError
++ # repeatedly returns an error that cannot be formatted, although
++ # this should not happen.
++ handle_nonzero_success(bytes)
++ message = result_buffer.value
++ ctypes.windll.kernel32.LocalFree(result_buffer)
++ return message
++
++ def handle_nonzero_success(result):
++ if result == 0:
++ value = ctypes.windll.kernel32.GetLastError()
++ strerror = format_system_message(value)
++ raise WindowsError(value, strerror)
++
++ target_is_directory = target_is_directory or os.path.isdir(target)
++ handle_nonzero_success(CreateSymbolicLink(link, target, target_is_directory))
++
++symlink = os.symlink if hasattr(os, 'symlink') else (
++ _symlink_win32 if platform.system() == 'Windows' else None
++)
++
++def remove_symlink(name):
++ # On Windows, to remove a directory symlink, one must use rmdir
++ try:
++ os.rmdir(name)
++ except OSError:
++ os.remove(name)
+diff -r 70274d53c1dd Lib/test/test_argparse.py
+--- a/Lib/test/test_argparse.py
++++ b/Lib/test/test_argparse.py
+@@ -1374,6 +1374,7 @@
+ ('X @hello', NS(a=None, x='X', y=['hello world!'])),
+ ('-a B @recursive Y Z', NS(a='A', x='hello world!', y=['Y', 'Z'])),
+ ('X @recursive Z -a B', NS(a='B', x='X', y=['hello world!', 'Z'])),
++ (["-a", "", "X", "Y"], NS(a='', x='X', y=['Y'])),
+ ]
+
+
+@@ -1763,6 +1764,14 @@
+ parser2.add_argument('-y', choices='123', help='y help')
+ parser2.add_argument('z', type=complex, nargs='*', help='z help')
+
++ # add third sub-parser
++ parser3_kwargs = dict(description='3 description')
++ if subparser_help:
++ parser3_kwargs['help'] = '3 help'
++ parser3 = subparsers.add_parser('3', **parser3_kwargs)
++ parser3.add_argument('t', type=int, help='t help')
++ parser3.add_argument('u', nargs='...', help='u help')
++
+ # return the main parser
+ return parser
+
+@@ -1792,6 +1801,10 @@
+ self.parser.parse_args('--foo 0.125 1 c'.split()),
+ NS(foo=True, bar=0.125, w=None, x='c'),
+ )
++ self.assertEqual(
++ self.parser.parse_args('-1.5 3 11 -- a --foo 7 -- b'.split()),
++ NS(foo=False, bar=-1.5, t=11, u=['a', '--foo', '7', '--', 'b']),
++ )
+
+ def test_parse_known_args(self):
+ self.assertEqual(
+@@ -1826,15 +1839,15 @@
+
+ def test_help(self):
+ self.assertEqual(self.parser.format_usage(),
+- 'usage: PROG [-h] [--foo] bar {1,2} ...\n')
++ 'usage: PROG [-h] [--foo] bar {1,2,3} ...\n')
+ self.assertEqual(self.parser.format_help(), textwrap.dedent('''\
+- usage: PROG [-h] [--foo] bar {1,2} ...
++ usage: PROG [-h] [--foo] bar {1,2,3} ...
+
+ main description
+
+ positional arguments:
+ bar bar help
+- {1,2} command help
++ {1,2,3} command help
+
+ optional arguments:
+ -h, --help show this help message and exit
+@@ -1845,15 +1858,15 @@
+ # Make sure - is still used for help if it is a non-first prefix char
+ parser = self._get_parser(prefix_chars='+:-')
+ self.assertEqual(parser.format_usage(),
+- 'usage: PROG [-h] [++foo] bar {1,2} ...\n')
++ 'usage: PROG [-h] [++foo] bar {1,2,3} ...\n')
+ self.assertEqual(parser.format_help(), textwrap.dedent('''\
+- usage: PROG [-h] [++foo] bar {1,2} ...
++ usage: PROG [-h] [++foo] bar {1,2,3} ...
+
+ main description
+
+ positional arguments:
+ bar bar help
+- {1,2} command help
++ {1,2,3} command help
+
+ optional arguments:
+ -h, --help show this help message and exit
+@@ -1864,15 +1877,15 @@
+ def test_help_alternate_prefix_chars(self):
+ parser = self._get_parser(prefix_chars='+:/')
+ self.assertEqual(parser.format_usage(),
+- 'usage: PROG [+h] [++foo] bar {1,2} ...\n')
++ 'usage: PROG [+h] [++foo] bar {1,2,3} ...\n')
+ self.assertEqual(parser.format_help(), textwrap.dedent('''\
+- usage: PROG [+h] [++foo] bar {1,2} ...
++ usage: PROG [+h] [++foo] bar {1,2,3} ...
+
+ main description
+
+ positional arguments:
+ bar bar help
+- {1,2} command help
++ {1,2,3} command help
+
+ optional arguments:
+ +h, ++help show this help message and exit
+@@ -1881,18 +1894,19 @@
+
+ def test_parser_command_help(self):
+ self.assertEqual(self.command_help_parser.format_usage(),
+- 'usage: PROG [-h] [--foo] bar {1,2} ...\n')
++ 'usage: PROG [-h] [--foo] bar {1,2,3} ...\n')
+ self.assertEqual(self.command_help_parser.format_help(),
+ textwrap.dedent('''\
+- usage: PROG [-h] [--foo] bar {1,2} ...
++ usage: PROG [-h] [--foo] bar {1,2,3} ...
+
+ main description
+
+ positional arguments:
+ bar bar help
+- {1,2} command help
++ {1,2,3} command help
+ 1 1 help
+ 2 2 help
++ 3 3 help
+
+ optional arguments:
+ -h, --help show this help message and exit
+diff -r 70274d53c1dd Lib/test/test_array.py
+--- a/Lib/test/test_array.py
++++ b/Lib/test/test_array.py
+@@ -985,6 +985,19 @@
+ upper = long(pow(2, a.itemsize * 8)) - 1L
+ self.check_overflow(lower, upper)
+
++ @test_support.cpython_only
++ def test_sizeof_with_buffer(self):
++ a = array.array(self.typecode, self.example)
++ basesize = test_support.calcvobjsize('4P')
++ buffer_size = a.buffer_info()[1] * a.itemsize
++ test_support.check_sizeof(self, a, basesize + buffer_size)
++
++ @test_support.cpython_only
++ def test_sizeof_without_buffer(self):
++ a = array.array(self.typecode)
++ basesize = test_support.calcvobjsize('4P')
++ test_support.check_sizeof(self, a, basesize)
++
+
+ class ByteTest(SignedNumberTest):
+ typecode = 'b'
+diff -r 70274d53c1dd Lib/test/test_ast.py
+--- a/Lib/test/test_ast.py
++++ b/Lib/test/test_ast.py
+@@ -231,6 +231,12 @@
+ im = ast.parse("from . import y").body[0]
+ self.assertIsNone(im.module)
+
++ def test_non_interned_future_from_ast(self):
++ mod = ast.parse("from __future__ import division")
++ self.assertIsInstance(mod.body[0], ast.ImportFrom)
++ mod.body[0].module = " __future__ ".strip()
++ compile(mod, "<test>", "exec")
++
+ def test_base_classes(self):
+ self.assertTrue(issubclass(ast.For, ast.stmt))
+ self.assertTrue(issubclass(ast.Name, ast.expr))
+diff -r 70274d53c1dd Lib/test/test_asyncore.py
+--- a/Lib/test/test_asyncore.py
++++ b/Lib/test/test_asyncore.py
+@@ -7,6 +7,7 @@
+ import time
+ import warnings
+ import errno
++import struct
+
+ from test import test_support
+ from test.test_support import TESTFN, run_unittest, unlink
+@@ -703,6 +704,26 @@
+ finally:
+ sock.close()
+
++ @unittest.skipUnless(threading, 'Threading required for this test.')
++ @test_support.reap_threads
++ def test_quick_connect(self):
++ # see: http://bugs.python.org/issue10340
++ server = TCPServer()
++ t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1, count=500))
++ t.start()
++
++ for x in xrange(20):
++ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
++ s.settimeout(.2)
++ s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
++ struct.pack('ii', 1, 0))
++ try:
++ s.connect(server.address)
++ except socket.error:
++ pass
++ finally:
++ s.close()
++
+
+ class TestAPI_UseSelect(BaseTestAPI):
+ use_poll = False
+diff -r 70274d53c1dd Lib/test/test_bigmem.py
+--- a/Lib/test/test_bigmem.py
++++ b/Lib/test/test_bigmem.py
+@@ -118,12 +118,13 @@
+ except MemoryError:
+ pass # acceptable on 32-bit
+
+- @precisionbigmemtest(size=_2G-1, memuse=2)
++ @precisionbigmemtest(size=_2G-1, memuse=4)
+ def test_decodeascii(self, size):
+ return self.basic_encode_test(size, 'ascii', c='A')
+
+ @precisionbigmemtest(size=_4G // 5, memuse=6+2)
+ def test_unicode_repr_oflw(self, size):
++ self.skipTest("test crashes - see issue #14904")
+ try:
+ s = u"\uAAAA"*size
+ r = repr(s)
+@@ -485,7 +486,7 @@
+ self.assertEqual(s.count('.'), 3)
+ self.assertEqual(s.count('-'), size * 2)
+
+- @bigmemtest(minsize=_2G + 10, memuse=2)
++ @bigmemtest(minsize=_2G + 10, memuse=5)
+ def test_repr_small(self, size):
+ s = '-' * size
+ s = repr(s)
+@@ -497,7 +498,6 @@
+ # repr() will create a string four times as large as this 'binary
+ # string', but we don't want to allocate much more than twice
+ # size in total. (We do extra testing in test_repr_large())
+- size = size // 5 * 2
+ s = '\x00' * size
+ s = repr(s)
+ self.assertEqual(len(s), size * 4 + 2)
+@@ -541,7 +541,7 @@
+ self.assertEqual(len(s), size * 2)
+ self.assertEqual(s.count('.'), size * 2)
+
+- @bigmemtest(minsize=_2G + 20, memuse=1)
++ @bigmemtest(minsize=_2G + 20, memuse=2)
+ def test_slice_and_getitem(self, size):
+ SUBSTR = '0123456789'
+ sublen = len(SUBSTR)
+diff -r 70274d53c1dd Lib/test/test_bisect.py
+--- a/Lib/test/test_bisect.py
++++ b/Lib/test/test_bisect.py
+@@ -23,6 +23,28 @@
+ import bisect as c_bisect
+
+
++class Range(object):
++ """A trivial xrange()-like object without any integer width limitations."""
++ def __init__(self, start, stop):
++ self.start = start
++ self.stop = stop
++ self.last_insert = None
++
++ def __len__(self):
++ return self.stop - self.start
++
++ def __getitem__(self, idx):
++ n = self.stop - self.start
++ if idx < 0:
++ idx += n
++ if idx >= n:
++ raise IndexError(idx)
++ return self.start + idx
++
++ def insert(self, idx, item):
++ self.last_insert = idx, item
++
++
+ class TestBisect(unittest.TestCase):
+ module = None
+
+@@ -122,6 +144,35 @@
+ self.assertRaises(ValueError, mod.insort_left, [1, 2, 3], 5, -1, 3),
+ self.assertRaises(ValueError, mod.insort_right, [1, 2, 3], 5, -1, 3),
+
++ def test_large_range(self):
++ # Issue 13496
++ mod = self.module
++ n = sys.maxsize
++ try:
++ data = xrange(n-1)
++ except OverflowError:
++ self.skipTest("can't create a xrange() object of size `sys.maxsize`")
++ self.assertEqual(mod.bisect_left(data, n-3), n-3)
++ self.assertEqual(mod.bisect_right(data, n-3), n-2)
++ self.assertEqual(mod.bisect_left(data, n-3, n-10, n), n-3)
++ self.assertEqual(mod.bisect_right(data, n-3, n-10, n), n-2)
++
++ def test_large_pyrange(self):
++ # Same as above, but without C-imposed limits on range() parameters
++ mod = self.module
++ n = sys.maxsize
++ data = Range(0, n-1)
++ self.assertEqual(mod.bisect_left(data, n-3), n-3)
++ self.assertEqual(mod.bisect_right(data, n-3), n-2)
++ self.assertEqual(mod.bisect_left(data, n-3, n-10, n), n-3)
++ self.assertEqual(mod.bisect_right(data, n-3, n-10, n), n-2)
++ x = n - 100
++ mod.insort_left(data, x, x - 50, x + 50)
++ self.assertEqual(data.last_insert, (x, x))
++ x = n - 200
++ mod.insort_right(data, x, x - 50, x + 50)
++ self.assertEqual(data.last_insert, (x + 1, x))
++
+ def test_random(self, n=25):
+ from random import randrange
+ for i in xrange(n):
+diff -r 70274d53c1dd Lib/test/test_cgi.py
+--- a/Lib/test/test_cgi.py
++++ b/Lib/test/test_cgi.py
+@@ -120,6 +120,11 @@
+
+ class CgiTests(unittest.TestCase):
+
++ def test_escape(self):
++ self.assertEqual("test &amp; string", cgi.escape("test & string"))
++ self.assertEqual("&lt;test string&gt;", cgi.escape("<test string>"))
++ self.assertEqual("&quot;test string&quot;", cgi.escape('"test string"', True))
++
+ def test_strict(self):
+ for orig, expect in parse_strict_test_cases:
+ # Test basic parsing
+diff -r 70274d53c1dd Lib/test/test_class.py
+--- a/Lib/test/test_class.py
++++ b/Lib/test/test_class.py
+@@ -628,6 +628,13 @@
+ a = A(hash(A.f.im_func)^(-1))
+ hash(a.f)
+
++ def testAttrSlots(self):
++ class C:
++ pass
++ for c in C, C():
++ self.assertRaises(TypeError, type(c).__getattribute__, c, [])
++ self.assertRaises(TypeError, type(c).__setattr__, c, [], [])
++
+ def test_main():
+ with test_support.check_py3k_warnings(
+ (".+__(get|set|del)slice__ has been removed", DeprecationWarning),
+diff -r 70274d53c1dd Lib/test/test_cmd.py
+--- a/Lib/test/test_cmd.py
++++ b/Lib/test/test_cmd.py
+@@ -84,11 +84,11 @@
+ <BLANKLINE>
+ Documented commands (type help <topic>):
+ ========================================
+- add
++ add help
+ <BLANKLINE>
+ Undocumented commands:
+ ======================
+- exit help shell
++ exit shell
+ <BLANKLINE>
+
+ Test for the function print_topics():
+@@ -125,11 +125,11 @@
+ <BLANKLINE>
+ Documented commands (type help <topic>):
+ ========================================
+- add
++ add help
+ <BLANKLINE>
+ Undocumented commands:
+ ======================
+- exit help shell
++ exit shell
+ <BLANKLINE>
+ help text for add
+ Hello from postloop
+diff -r 70274d53c1dd Lib/test/test_cmd_line_script.py
+--- a/Lib/test/test_cmd_line_script.py
++++ b/Lib/test/test_cmd_line_script.py
+@@ -6,11 +6,14 @@
+ import test.test_support
+ from test.script_helper import (run_python,
+ temp_dir, make_script, compile_script,
+- make_pkg, make_zip_script, make_zip_pkg)
++ assert_python_failure, make_pkg,
++ make_zip_script, make_zip_pkg)
+
+ verbose = test.test_support.verbose
+
+
++example_args = ['test1', 'test2', 'test3']
++
+ test_source = """\
+ # Script may be run with optimisation enabled, so don't rely on assert
+ # statements being executed
+@@ -204,6 +207,19 @@
+ launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
+ self._check_import_error(launch_name, msg)
+
++ def test_dash_m_error_code_is_one(self):
++ # If a module is invoked with the -m command line flag
++ # and results in an error that the return code to the
++ # shell is '1'
++ with temp_dir() as script_dir:
++ pkg_dir = os.path.join(script_dir, 'test_pkg')
++ make_pkg(pkg_dir)
++ script_name = _make_test_script(pkg_dir, 'other', "if __name__ == '__main__': raise ValueError")
++ rc, out, err = assert_python_failure('-m', 'test_pkg.other', *example_args)
++ if verbose > 1:
++ print(out)
++ self.assertEqual(rc, 1)
++
+
+ def test_main():
+ test.test_support.run_unittest(CmdLineTest)
+diff -r 70274d53c1dd Lib/test/test_codecs.py
+--- a/Lib/test/test_codecs.py
++++ b/Lib/test/test_codecs.py
+@@ -495,7 +495,19 @@
+ )
+
+ def test_errors(self):
+- self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode, "\xff", "strict", True)
++ tests = [
++ (b'\xff', u'\ufffd'),
++ (b'A\x00Z', u'A\ufffd'),
++ (b'A\x00B\x00C\x00D\x00Z', u'ABCD\ufffd'),
++ (b'\x00\xd8', u'\ufffd'),
++ (b'\x00\xd8A', u'\ufffd'),
++ (b'\x00\xd8A\x00', u'\ufffdA'),
++ (b'\x00\xdcA\x00', u'\ufffdA'),
++ ]
++ for raw, expected in tests:
++ self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
++ raw, 'strict', True)
++ self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
+
+ class UTF16BETest(ReadTest):
+ encoding = "utf-16-be"
+@@ -516,7 +528,19 @@
+ )
+
+ def test_errors(self):
+- self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode, "\xff", "strict", True)
++ tests = [
++ (b'\xff', u'\ufffd'),
++ (b'\x00A\xff', u'A\ufffd'),
++ (b'\x00A\x00B\x00C\x00DZ', u'ABCD\ufffd'),
++ (b'\xd8\x00', u'\ufffd'),
++ (b'\xd8\x00\xdc', u'\ufffd'),
++ (b'\xd8\x00\x00A', u'\ufffdA'),
++ (b'\xdc\x00\x00A', u'\ufffdA'),
++ ]
++ for raw, expected in tests:
++ self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
++ raw, 'strict', True)
++ self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
+
+ class UTF8Test(ReadTest):
+ encoding = "utf-8"
+diff -r 70274d53c1dd Lib/test/test_cookie.py
+--- a/Lib/test/test_cookie.py
++++ b/Lib/test/test_cookie.py
+@@ -64,13 +64,13 @@
+
+ # loading 'expires'
+ C = Cookie.SimpleCookie()
+- C.load('Customer="W"; expires=Wed, 01-Jan-2010 00:00:00 GMT')
++ C.load('Customer="W"; expires=Wed, 01 Jan 2010 00:00:00 GMT')
+ self.assertEqual(C['Customer']['expires'],
+- 'Wed, 01-Jan-2010 00:00:00 GMT')
++ 'Wed, 01 Jan 2010 00:00:00 GMT')
+ C = Cookie.SimpleCookie()
+- C.load('Customer="W"; expires=Wed, 01-Jan-98 00:00:00 GMT')
++ C.load('Customer="W"; expires=Wed, 01 Jan 98 00:00:00 GMT')
+ self.assertEqual(C['Customer']['expires'],
+- 'Wed, 01-Jan-98 00:00:00 GMT')
++ 'Wed, 01 Jan 98 00:00:00 GMT')
+
+ def test_extended_encode(self):
+ # Issue 9824: some browsers don't follow the standard; we now
+diff -r 70274d53c1dd Lib/test/test_deque.py
+--- a/Lib/test/test_deque.py
++++ b/Lib/test/test_deque.py
+@@ -6,6 +6,7 @@
+ import copy
+ import cPickle as pickle
+ import random
++import struct
+
+ BIG = 100000
+
+@@ -517,6 +518,21 @@
+ gc.collect()
+ self.assertTrue(ref() is None, "Cycle was not collected")
+
++ check_sizeof = test_support.check_sizeof
++
++ @test_support.cpython_only
++ def test_sizeof(self):
++ BLOCKLEN = 62
++ basesize = test_support.calcobjsize('2P4PlP')
++ blocksize = struct.calcsize('2P%dP' % BLOCKLEN)
++ self.assertEqual(object.__sizeof__(deque()), basesize)
++ check = self.check_sizeof
++ check(deque(), basesize + blocksize)
++ check(deque('a'), basesize + blocksize)
++ check(deque('a' * (BLOCKLEN // 2)), basesize + blocksize)
++ check(deque('a' * (BLOCKLEN // 2 + 1)), basesize + 2 * blocksize)
++ check(deque('a' * (42 * BLOCKLEN)), basesize + 43 * blocksize)
++
+ class TestVariousIteratorArgs(unittest.TestCase):
+
+ def test_constructor(self):
+diff -r 70274d53c1dd Lib/test/test_descr.py
+--- a/Lib/test/test_descr.py
++++ b/Lib/test/test_descr.py
+@@ -1,7 +1,9 @@
+ import __builtin__
++import gc
+ import sys
+ import types
+ import unittest
++import weakref
+
+ from copy import deepcopy
+ from test import test_support
+@@ -1127,7 +1129,6 @@
+ self.assertEqual(Counted.counter, 0)
+
+ # Test lookup leaks [SF bug 572567]
+- import gc
+ if hasattr(gc, 'get_objects'):
+ class G(object):
+ def __cmp__(self, other):
+@@ -1418,6 +1419,22 @@
+ self.assertEqual(x, spam.spamlist)
+ self.assertEqual(a, a1)
+ self.assertEqual(d, d1)
++ spam_cm = spam.spamlist.__dict__['classmeth']
++ x2, a2, d2 = spam_cm(spam.spamlist, *a, **d)
++ self.assertEqual(x2, spam.spamlist)
++ self.assertEqual(a2, a1)
++ self.assertEqual(d2, d1)
++ class SubSpam(spam.spamlist): pass
++ x2, a2, d2 = spam_cm(SubSpam, *a, **d)
++ self.assertEqual(x2, SubSpam)
++ self.assertEqual(a2, a1)
++ self.assertEqual(d2, d1)
++ with self.assertRaises(TypeError):
++ spam_cm()
++ with self.assertRaises(TypeError):
++ spam_cm(spam.spamlist())
++ with self.assertRaises(TypeError):
++ spam_cm(list)
+
+ def test_staticmethods(self):
+ # Testing static methods...
+@@ -4541,7 +4558,6 @@
+ self.assertRaises(AttributeError, getattr, C(), "attr")
+ self.assertEqual(descr.counter, 4)
+
+- import gc
+ class EvilGetattribute(object):
+ # This used to segfault
+ def __getattr__(self, name):
+@@ -4554,6 +4570,9 @@
+
+ self.assertRaises(AttributeError, getattr, EvilGetattribute(), "attr")
+
++ def test_type___getattribute__(self):
++ self.assertRaises(TypeError, type.__getattribute__, list, type)
++
+ def test_abstractmethods(self):
+ # type pretends not to have __abstractmethods__.
+ self.assertRaises(AttributeError, getattr, type, "__abstractmethods__")
+@@ -4588,7 +4607,30 @@
+ pass
+ Foo.__repr__ = Foo.__str__
+ foo = Foo()
+- str(foo)
++ self.assertRaises(RuntimeError, str, foo)
++ self.assertRaises(RuntimeError, repr, foo)
++
++ def test_mixing_slot_wrappers(self):
++ class X(dict):
++ __setattr__ = dict.__setitem__
++ x = X()
++ x.y = 42
++ self.assertEqual(x["y"], 42)
++
++ def test_cycle_through_dict(self):
++ # See bug #1469629
++ class X(dict):
++ def __init__(self):
++ dict.__init__(self)
++ self.__dict__ = self
++ x = X()
++ x.attr = 42
++ wr = weakref.ref(x)
++ del x
++ test_support.gc_collect()
++ self.assertIsNone(wr())
++ for o in gc.get_objects():
++ self.assertIsNot(type(o), X)
+
+ class DictProxyTests(unittest.TestCase):
+ def setUp(self):
+diff -r 70274d53c1dd Lib/test/test_dict.py
+--- a/Lib/test/test_dict.py
++++ b/Lib/test/test_dict.py
+@@ -299,6 +299,26 @@
+ x.fail = True
+ self.assertRaises(Exc, d.setdefault, x, [])
+
++ def test_setdefault_atomic(self):
++ # Issue #13521: setdefault() calls __hash__ and __eq__ only once.
++ class Hashed(object):
++ def __init__(self):
++ self.hash_count = 0
++ self.eq_count = 0
++ def __hash__(self):
++ self.hash_count += 1
++ return 42
++ def __eq__(self, other):
++ self.eq_count += 1
++ return id(self) == id(other)
++ hashed1 = Hashed()
++ y = {hashed1: 5}
++ hashed2 = Hashed()
++ y.setdefault(hashed2, [])
++ self.assertEqual(hashed1.hash_count, 1)
++ self.assertEqual(hashed2.hash_count, 1)
++ self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
++
+ def test_popitem(self):
+ # dict.popitem()
+ for copymode in -1, +1:
+diff -r 70274d53c1dd Lib/test/test_file2k.py
+--- a/Lib/test/test_file2k.py
++++ b/Lib/test/test_file2k.py
+@@ -2,6 +2,9 @@
+ import os
+ import unittest
+ import itertools
++import select
++import signal
++import subprocess
+ import time
+ from array import array
+ from weakref import proxy
+@@ -89,6 +92,13 @@
+ def testRepr(self):
+ # verify repr works
+ self.assertTrue(repr(self.f).startswith("<open file '" + TESTFN))
++ # see issue #14161
++ # Windows doesn't like \r\n\t" in the file name, but ' is ok
++ fname = 'xx\rxx\nxx\'xx"xx' if sys.platform != "win32" else "xx'xx"
++ with open(fname, 'w') as f:
++ self.addCleanup(os.remove, fname)
++ self.assertTrue(repr(f).startswith(
++ "<open file %r, mode 'w' at" % fname))
+
+ def testErrors(self):
+ self.f.close()
+@@ -595,6 +605,148 @@
+ self._test_close_open_io(io_func)
+
+
++@unittest.skipUnless(os.name == 'posix', 'test requires a posix system.')
++class TestFileSignalEINTR(unittest.TestCase):
++ def _test_reading(self, data_to_write, read_and_verify_code, method_name,
++ universal_newlines=False):
++ """Generic buffered read method test harness to verify EINTR behavior.
++
++ Also validates that Python signal handlers are run during the read.
++
++ Args:
++ data_to_write: String to write to the child process for reading
++ before sending it a signal, confirming the signal was handled,
++ writing a final newline char and closing the infile pipe.
++ read_and_verify_code: Single "line" of code to read from a file
++ object named 'infile' and validate the result. This will be
++ executed as part of a python subprocess fed data_to_write.
++ method_name: The name of the read method being tested, for use in
++ an error message on failure.
++ universal_newlines: If True, infile will be opened in universal
++ newline mode in the child process.
++ """
++ if universal_newlines:
++ # Test the \r\n -> \n conversion while we're at it.
++ data_to_write = data_to_write.replace('\n', '\r\n')
++ infile_setup_code = 'infile = os.fdopen(sys.stdin.fileno(), "rU")'
++ else:
++ infile_setup_code = 'infile = sys.stdin'
++ # Total pipe IO in this function is smaller than the minimum posix OS
++ # pipe buffer size of 512 bytes. No writer should block.
++ assert len(data_to_write) < 512, 'data_to_write must fit in pipe buf.'
++
++ child_code = (
++ 'import os, signal, sys ;'
++ 'signal.signal('
++ 'signal.SIGINT, lambda s, f: sys.stderr.write("$\\n")) ;'
++ + infile_setup_code + ' ;' +
++ 'assert isinstance(infile, file) ;'
++ 'sys.stderr.write("Go.\\n") ;'
++ + read_and_verify_code)
++ reader_process = subprocess.Popen(
++ [sys.executable, '-c', child_code],
++ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
++ stderr=subprocess.PIPE)
++ # Wait for the signal handler to be installed.
++ go = reader_process.stderr.read(4)
++ if go != 'Go.\n':
++ reader_process.kill()
++ self.fail('Error from %s process while awaiting "Go":\n%s' % (
++ method_name, go+reader_process.stderr.read()))
++ reader_process.stdin.write(data_to_write)
++ signals_sent = 0
++ rlist = []
++ # We don't know when the read_and_verify_code in our child is actually
++ # executing within the read system call we want to interrupt. This
++ # loop waits for a bit before sending the first signal to increase
++ # the likelihood of that. Implementations without correct EINTR
++ # and signal handling usually fail this test.
++ while not rlist:
++ rlist, _, _ = select.select([reader_process.stderr], (), (), 0.05)
++ reader_process.send_signal(signal.SIGINT)
++ # Give the subprocess time to handle it before we loop around and
++ # send another one. On OSX the second signal happening close to
++ # immediately after the first was causing the subprocess to crash
++ # via the OS's default SIGINT handler.
++ time.sleep(0.1)
++ signals_sent += 1
++ if signals_sent > 200:
++ reader_process.kill()
++ self.fail("failed to handle signal during %s." % method_name)
++ # This assumes anything unexpected that writes to stderr will also
++ # write a newline. That is true of the traceback printing code.
++ signal_line = reader_process.stderr.readline()
++ if signal_line != '$\n':
++ reader_process.kill()
++ self.fail('Error from %s process while awaiting signal:\n%s' % (
++ method_name, signal_line+reader_process.stderr.read()))
++ # We append a newline to our input so that a readline call can
++ # end on its own before the EOF is seen.
++ stdout, stderr = reader_process.communicate(input='\n')
++ if reader_process.returncode != 0:
++ self.fail('%s() process exited rc=%d.\nSTDOUT:\n%s\nSTDERR:\n%s' % (
++ method_name, reader_process.returncode, stdout, stderr))
++
++ def test_readline(self, universal_newlines=False):
++ """file.readline must handle signals and not lose data."""
++ self._test_reading(
++ data_to_write='hello, world!',
++ read_and_verify_code=(
++ 'line = infile.readline() ;'
++ 'expected_line = "hello, world!\\n" ;'
++ 'assert line == expected_line, ('
++ '"read %r expected %r" % (line, expected_line))'
++ ),
++ method_name='readline',
++ universal_newlines=universal_newlines)
++
++ def test_readline_with_universal_newlines(self):
++ self.test_readline(universal_newlines=True)
++
++ def test_readlines(self, universal_newlines=False):
++ """file.readlines must handle signals and not lose data."""
++ self._test_reading(
++ data_to_write='hello\nworld!',
++ read_and_verify_code=(
++ 'lines = infile.readlines() ;'
++ 'expected_lines = ["hello\\n", "world!\\n"] ;'
++ 'assert lines == expected_lines, ('
++ '"readlines returned wrong data.\\n" '
++ '"got lines %r\\nexpected %r" '
++ '% (lines, expected_lines))'
++ ),
++ method_name='readlines',
++ universal_newlines=universal_newlines)
++
++ def test_readlines_with_universal_newlines(self):
++ self.test_readlines(universal_newlines=True)
++
++ def test_readall(self):
++ """Unbounded file.read() must handle signals and not lose data."""
++ self._test_reading(
++ data_to_write='hello, world!abcdefghijklm',
++ read_and_verify_code=(
++ 'data = infile.read() ;'
++ 'expected_data = "hello, world!abcdefghijklm\\n";'
++ 'assert data == expected_data, ('
++ '"read %r expected %r" % (data, expected_data))'
++ ),
++ method_name='unbounded read')
++
++ def test_readinto(self):
++ """file.readinto must handle signals and not lose data."""
++ self._test_reading(
++ data_to_write='hello, world!',
++ read_and_verify_code=(
++ 'data = bytearray(50) ;'
++ 'num_read = infile.readinto(data) ;'
++ 'expected_data = "hello, world!\\n";'
++ 'assert data[:num_read] == expected_data, ('
++ '"read %r expected %r" % (data, expected_data))'
++ ),
++ method_name='readinto')
++
++
+ class StdoutTests(unittest.TestCase):
+
+ def test_move_stdout_on_write(self):
+@@ -671,7 +823,7 @@
+ # So get rid of it no matter what.
+ try:
+ run_unittest(AutoFileTests, OtherFileTests, FileSubclassTests,
+- FileThreadingTests, StdoutTests)
++ FileThreadingTests, TestFileSignalEINTR, StdoutTests)
+ finally:
+ if os.path.exists(TESTFN):
+ os.unlink(TESTFN)
+diff -r 70274d53c1dd Lib/test/test_fileio.py
+--- a/Lib/test/test_fileio.py
++++ b/Lib/test/test_fileio.py
+@@ -130,6 +130,14 @@
+ else:
+ self.fail("Should have raised IOError")
+
++ @unittest.skipIf(os.name == 'nt', "test only works on a POSIX-like system")
++ def testOpenDirFD(self):
++ fd = os.open('.', os.O_RDONLY)
++ with self.assertRaises(IOError) as cm:
++ _FileIO(fd, 'r')
++ os.close(fd)
++ self.assertEqual(cm.exception.errno, errno.EISDIR)
++
+ #A set of functions testing that we get expected behaviour if someone has
+ #manually closed the internal file descriptor. First, a decorator:
+ def ClosedFD(func):
+@@ -421,6 +429,17 @@
+ 'IOError: [Errno 2] No such file or directory' not in out):
+ self.fail('Bad output: %r' % out)
+
++ def testUnclosedFDOnException(self):
++ class MyException(Exception): pass
++ class MyFileIO(_FileIO):
++ def __setattr__(self, name, value):
++ if name == "name":
++ raise MyException("blocked setting name")
++ return super(MyFileIO, self).__setattr__(name, value)
++ fd = os.open(__file__, os.O_RDONLY)
++ self.assertRaises(MyException, MyFileIO, fd)
++ os.close(fd) # should not raise OSError(EBADF)
++
+ def test_main():
+ # Historically, these tests have been sloppy about removing TESTFN.
+ # So get rid of it no matter what.
+diff -r 70274d53c1dd Lib/test/test_fractions.py
+--- a/Lib/test/test_fractions.py
++++ b/Lib/test/test_fractions.py
+@@ -6,6 +6,7 @@
+ import numbers
+ import operator
+ import fractions
++import sys
+ import unittest
+ from copy import copy, deepcopy
+ from cPickle import dumps, loads
+@@ -88,6 +89,9 @@
+ __hash__ = None
+
+
++class DummyFraction(fractions.Fraction):
++ """Dummy Fraction subclass for copy and deepcopy testing."""
++
+ class GcdTest(unittest.TestCase):
+
+ def testMisc(self):
+@@ -301,11 +305,15 @@
+ self.assertEqual(F(201, 200).limit_denominator(100), F(1))
+ self.assertEqual(F(201, 200).limit_denominator(101), F(102, 101))
+ self.assertEqual(F(0).limit_denominator(10000), F(0))
++ for i in (0, -1):
++ self.assertRaisesMessage(
++ ValueError, "max_denominator should be at least 1",
++ F(1).limit_denominator, i)
+
+ def testConversions(self):
+ self.assertTypedEquals(-1, math.trunc(F(-11, 10)))
+ self.assertTypedEquals(-1, int(F(-11, 10)))
+-
++ self.assertTypedEquals(1, math.trunc(F(11, 10)))
+ self.assertEqual(False, bool(F(0, 1)))
+ self.assertEqual(True, bool(F(3, 2)))
+ self.assertTypedEquals(0.1, float(F(1, 10)))
+@@ -330,6 +338,7 @@
+ self.assertEqual(F(8, 27), F(2, 3) ** F(3))
+ self.assertEqual(F(27, 8), F(2, 3) ** F(-3))
+ self.assertTypedEquals(2.0, F(4) ** F(1, 2))
++ self.assertEqual(F(1, 1), +F(1, 1))
+ # Will return 1j in 3.0:
+ self.assertRaises(ValueError, pow, F(-1), F(1, 2))
+
+@@ -394,6 +403,10 @@
+ TypeError,
+ "unsupported operand type(s) for +: 'Fraction' and 'Decimal'",
+ operator.add, F(3,11), Decimal('3.1415926'))
++ self.assertRaisesMessage(
++ TypeError,
++ "unsupported operand type(s) for +: 'Decimal' and 'Fraction'",
++ operator.add, Decimal('3.1415926'), F(3,11))
+ self.assertNotEqual(F(5, 2), Decimal('2.5'))
+
+ def testComparisons(self):
+@@ -571,9 +584,14 @@
+
+ def test_copy_deepcopy_pickle(self):
+ r = F(13, 7)
++ dr = DummyFraction(13, 7)
+ self.assertEqual(r, loads(dumps(r)))
+ self.assertEqual(id(r), id(copy(r)))
+ self.assertEqual(id(r), id(deepcopy(r)))
++ self.assertNotEqual(id(dr), id(copy(dr)))
++ self.assertNotEqual(id(dr), id(deepcopy(dr)))
++ self.assertTypedEquals(dr, copy(dr))
++ self.assertTypedEquals(dr, deepcopy(dr))
+
+ def test_slots(self):
+ # Issue 4998
+diff -r 70274d53c1dd Lib/test/test_gdb.py
+--- a/Lib/test/test_gdb.py
++++ b/Lib/test/test_gdb.py
+@@ -32,6 +32,15 @@
+ if gdbpy_version == '':
+ raise unittest.SkipTest("gdb not built with embedded python support")
+
++# Verify that "gdb" can load our custom hooks
++p = subprocess.Popen(["gdb", "--batch", cmd,
++ "--args", sys.executable],
++ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
++__, gdbpy_errors = p.communicate()
++if b"auto-loading has been declined" in gdbpy_errors:
++ msg = "gdb security settings prevent use of custom hooks: %s"
++ raise unittest.SkipTest(msg % gdbpy_errors)
++
+ def python_is_optimized():
+ cflags = sysconfig.get_config_vars()['PY_CFLAGS']
+ final_opt = ""
+diff -r 70274d53c1dd Lib/test/test_hashlib.py
+--- a/Lib/test/test_hashlib.py
++++ b/Lib/test/test_hashlib.py
+@@ -108,12 +108,8 @@
+ _algo.islower()]))
+
+ def test_unknown_hash(self):
+- try:
+- hashlib.new('spam spam spam spam spam')
+- except ValueError:
+- pass
+- else:
+- self.assertTrue(0 == "hashlib didn't reject bogus hash name")
++ self.assertRaises(ValueError, hashlib.new, 'spam spam spam spam spam')
++ self.assertRaises(TypeError, hashlib.new, 1)
+
+ def test_get_builtin_constructor(self):
+ get_builtin_constructor = hashlib.__dict__[
+@@ -132,6 +128,7 @@
+ sys.modules['_md5'] = _md5
+ else:
+ del sys.modules['_md5']
++ self.assertRaises(TypeError, get_builtin_constructor, 3)
+
+ def test_hexdigest(self):
+ for name in self.supported_hash_names:
+diff -r 70274d53c1dd Lib/test/test_htmlparser.py
+--- a/Lib/test/test_htmlparser.py
++++ b/Lib/test/test_htmlparser.py
+@@ -260,6 +260,16 @@
+ ('starttag', 'a', [('foo', None), ('=', None), ('bar', None)])
+ ]
+ self._run_check(html, expected)
++ #see issue #14538
++ html = ('<meta><meta / ><meta // ><meta / / >'
++ '<meta/><meta /><meta //><meta//>')
++ expected = [
++ ('starttag', 'meta', []), ('starttag', 'meta', []),
++ ('starttag', 'meta', []), ('starttag', 'meta', []),
++ ('startendtag', 'meta', []), ('startendtag', 'meta', []),
++ ('startendtag', 'meta', []), ('startendtag', 'meta', []),
++ ]
++ self._run_check(html, expected)
+
+ def test_declaration_junk_chars(self):
+ self._run_check("<!DOCTYPE foo $ >", [('decl', 'DOCTYPE foo $ ')])
+diff -r 70274d53c1dd Lib/test/test_httplib.py
+--- a/Lib/test/test_httplib.py
++++ b/Lib/test/test_httplib.py
+@@ -90,6 +90,34 @@
+ conn.request('POST', '/', body, headers)
+ self.assertEqual(conn._buffer.count[header.lower()], 1)
+
++ def test_content_length_0(self):
++
++ class ContentLengthChecker(list):
++ def __init__(self):
++ list.__init__(self)
++ self.content_length = None
++ def append(self, item):
++ kv = item.split(':', 1)
++ if len(kv) > 1 and kv[0].lower() == 'content-length':
++ self.content_length = kv[1].strip()
++ list.append(self, item)
++
++ # POST with empty body
++ conn = httplib.HTTPConnection('example.com')
++ conn.sock = FakeSocket(None)
++ conn._buffer = ContentLengthChecker()
++ conn.request('POST', '/', '')
++ self.assertEqual(conn._buffer.content_length, '0',
++ 'Header Content-Length not set')
++
++ # PUT request with empty body
++ conn = httplib.HTTPConnection('example.com')
++ conn.sock = FakeSocket(None)
++ conn._buffer = ContentLengthChecker()
++ conn.request('PUT', '/', '')
++ self.assertEqual(conn._buffer.content_length, '0',
++ 'Header Content-Length not set')
++
+ def test_putheader(self):
+ conn = httplib.HTTPConnection('example.com')
+ conn.sock = FakeSocket(None)
+@@ -349,6 +377,14 @@
+ resp.begin()
+ self.assertRaises(httplib.LineTooLong, resp.read)
+
++ def test_early_eof(self):
++ # Test httpresponse with no \r\n termination,
++ body = "HTTP/1.1 200 Ok"
++ sock = FakeSocket(body)
++ resp = httplib.HTTPResponse(sock)
++ resp.begin()
++ self.assertEqual(resp.read(), '')
++ self.assertTrue(resp.isclosed())
+
+ class OfflineTest(TestCase):
+ def test_responses(self):
+diff -r 70274d53c1dd Lib/test/test_httpservers.py
+--- a/Lib/test/test_httpservers.py
++++ b/Lib/test/test_httpservers.py
+@@ -4,11 +4,6 @@
+ Josip Dzolonga, and Michael Otteneder for the 2007/08 GHOP contest.
+ """
+
+-from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
+-from SimpleHTTPServer import SimpleHTTPRequestHandler
+-from CGIHTTPServer import CGIHTTPRequestHandler
+-import CGIHTTPServer
+-
+ import os
+ import sys
+ import re
+@@ -17,12 +12,17 @@
+ import urllib
+ import httplib
+ import tempfile
++import unittest
++import CGIHTTPServer
+
+-import unittest
+
++from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
++from SimpleHTTPServer import SimpleHTTPRequestHandler
++from CGIHTTPServer import CGIHTTPRequestHandler
+ from StringIO import StringIO
++from test import test_support
+
+-from test import test_support
++
+ threading = test_support.import_module('threading')
+
+
+@@ -43,7 +43,7 @@
+ self.end_headers()
+ self.wfile.write(b'<html><body>Data</body></html>\r\n')
+
+- def log_message(self, format, *args):
++ def log_message(self, fmt, *args):
+ pass
+
+
+@@ -97,9 +97,9 @@
+ self.handler = SocketlessRequestHandler()
+
+ def send_typical_request(self, message):
+- input = StringIO(message)
++ input_msg = StringIO(message)
+ output = StringIO()
+- self.handler.rfile = input
++ self.handler.rfile = input_msg
+ self.handler.wfile = output
+ self.handler.handle_one_request()
+ output.seek(0)
+@@ -296,7 +296,7 @@
+ os.chdir(self.cwd)
+ try:
+ shutil.rmtree(self.tempdir)
+- except:
++ except OSError:
+ pass
+ finally:
+ BaseTestCase.tearDown(self)
+@@ -418,41 +418,44 @@
+ finally:
+ BaseTestCase.tearDown(self)
+
+- def test_url_collapse_path_split(self):
++ def test_url_collapse_path(self):
++ # verify tail is the last portion and head is the rest on proper urls
+ test_vectors = {
+- '': ('/', ''),
++ '': '//',
+ '..': IndexError,
+ '/.//..': IndexError,
+- '/': ('/', ''),
+- '//': ('/', ''),
+- '/\\': ('/', '\\'),
+- '/.//': ('/', ''),
+- 'cgi-bin/file1.py': ('/cgi-bin', 'file1.py'),
+- '/cgi-bin/file1.py': ('/cgi-bin', 'file1.py'),
+- 'a': ('/', 'a'),
+- '/a': ('/', 'a'),
+- '//a': ('/', 'a'),
+- './a': ('/', 'a'),
+- './C:/': ('/C:', ''),
+- '/a/b': ('/a', 'b'),
+- '/a/b/': ('/a/b', ''),
+- '/a/b/c/..': ('/a/b', ''),
+- '/a/b/c/../d': ('/a/b', 'd'),
+- '/a/b/c/../d/e/../f': ('/a/b/d', 'f'),
+- '/a/b/c/../d/e/../../f': ('/a/b', 'f'),
+- '/a/b/c/../d/e/.././././..//f': ('/a/b', 'f'),
++ '/': '//',
++ '//': '//',
++ '/\\': '//\\',
++ '/.//': '//',
++ 'cgi-bin/file1.py': '/cgi-bin/file1.py',
++ '/cgi-bin/file1.py': '/cgi-bin/file1.py',
++ 'a': '//a',
++ '/a': '//a',
++ '//a': '//a',
++ './a': '//a',
++ './C:/': '/C:/',
++ '/a/b': '/a/b',
++ '/a/b/': '/a/b/',
++ '/a/b/.': '/a/b/',
++ '/a/b/c/..': '/a/b/',
++ '/a/b/c/../d': '/a/b/d',
++ '/a/b/c/../d/e/../f': '/a/b/d/f',
++ '/a/b/c/../d/e/../../f': '/a/b/f',
++ '/a/b/c/../d/e/.././././..//f': '/a/b/f',
+ '../a/b/c/../d/e/.././././..//f': IndexError,
+- '/a/b/c/../d/e/../../../f': ('/a', 'f'),
+- '/a/b/c/../d/e/../../../../f': ('/', 'f'),
++ '/a/b/c/../d/e/../../../f': '/a/f',
++ '/a/b/c/../d/e/../../../../f': '//f',
+ '/a/b/c/../d/e/../../../../../f': IndexError,
+- '/a/b/c/../d/e/../../../../f/..': ('/', ''),
++ '/a/b/c/../d/e/../../../../f/..': '//',
++ '/a/b/c/../d/e/../../../../f/../.': '//',
+ }
+ for path, expected in test_vectors.iteritems():
+ if isinstance(expected, type) and issubclass(expected, Exception):
+ self.assertRaises(expected,
+- CGIHTTPServer._url_collapse_path_split, path)
++ CGIHTTPServer._url_collapse_path, path)
+ else:
+- actual = CGIHTTPServer._url_collapse_path_split(path)
++ actual = CGIHTTPServer._url_collapse_path(path)
+ self.assertEqual(expected, actual,
+ msg='path = %r\nGot: %r\nWanted: %r' %
+ (path, actual, expected))
+diff -r 70274d53c1dd Lib/test/test_import.py
+--- a/Lib/test/test_import.py
++++ b/Lib/test/test_import.py
+@@ -7,9 +7,12 @@
+ import stat
+ import sys
+ import unittest
++import textwrap
++import shutil
++
+ from test.test_support import (unlink, TESTFN, unload, run_unittest, rmtree,
+ is_jython, check_warnings, EnvironmentVarGuard)
+-import textwrap
++from test import symlink_support
+ from test import script_helper
+
+ def remove_files(name):
+@@ -424,6 +427,13 @@
+ drive = path[0]
+ unc = "\\\\%s\\%s$"%(hn, drive)
+ unc += path[2:]
++ try:
++ os.listdir(unc)
++ except OSError as e:
++ if e.errno in (errno.EPERM, errno.EACCES):
++ # See issue #15338
++ self.skipTest("cannot access administrative share %r" % (unc,))
++ raise
+ sys.path.append(path)
+ mod = __import__("test_trailing_slash")
+ self.assertEqual(mod.testdata, 'test_trailing_slash')
+@@ -488,8 +498,58 @@
+ "implicit absolute import")
+
+
++class TestSymbolicallyLinkedPackage(unittest.TestCase):
++ package_name = 'sample'
++
++ def setUp(self):
++ if os.path.exists(self.tagged):
++ shutil.rmtree(self.tagged)
++ if os.path.exists(self.package_name):
++ symlink_support.remove_symlink(self.package_name)
++ self.orig_sys_path = sys.path[:]
++
++ # create a sample package; imagine you have a package with a tag and
++ # you want to symbolically link it from its untagged name.
++ os.mkdir(self.tagged)
++ init_file = os.path.join(self.tagged, '__init__.py')
++ open(init_file, 'w').close()
++ assert os.path.exists(init_file)
++
++ # now create a symlink to the tagged package
++ # sample -> sample-tagged
++ symlink_support.symlink(self.tagged, self.package_name)
++
++ assert os.path.isdir(self.package_name)
++ assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
++
++ @property
++ def tagged(self):
++ return self.package_name + '-tagged'
++
++ # regression test for issue6727
++ @unittest.skipUnless(
++ not hasattr(sys, 'getwindowsversion')
++ or sys.getwindowsversion() >= (6, 0),
++ "Windows Vista or later required")
++ @symlink_support.skip_unless_symlink
++ def test_symlinked_dir_importable(self):
++ # make sure sample can only be imported from the current directory.
++ sys.path[:] = ['.']
++
++ # and try to import the package
++ __import__(self.package_name)
++
++ def tearDown(self):
++ # now cleanup
++ if os.path.exists(self.package_name):
++ symlink_support.remove_symlink(self.package_name)
++ if os.path.exists(self.tagged):
++ shutil.rmtree(self.tagged)
++ sys.path[:] = self.orig_sys_path
++
+ def test_main(verbose=None):
+- run_unittest(ImportTests, PycRewritingTests, PathsTests, RelativeImportTests)
++ run_unittest(ImportTests, PycRewritingTests, PathsTests,
++ RelativeImportTests, TestSymbolicallyLinkedPackage)
+
+ if __name__ == '__main__':
+ # Test needs to be a package, so we can do relative imports.
+diff -r 70274d53c1dd Lib/test/test_io.py
+--- a/Lib/test/test_io.py
++++ b/Lib/test/test_io.py
+@@ -593,6 +593,19 @@
+ self.assertEqual(rawio.read(2), None)
+ self.assertEqual(rawio.read(2), b"")
+
++ def test_fileio_closefd(self):
++ # Issue #4841
++ with self.open(__file__, 'rb') as f1, \
++ self.open(__file__, 'rb') as f2:
++ fileio = self.FileIO(f1.fileno(), closefd=False)
++ # .__init__() must not close f1
++ fileio.__init__(f2.fileno(), closefd=False)
++ f1.readline()
++ # .close() must not close f2
++ fileio.close()
++ f2.readline()
++
++
+ class CIOTest(IOTest):
+
+ def test_IOBase_finalize(self):
+@@ -735,6 +748,20 @@
+ buf.raw = x
+
+
++class SizeofTest:
++
++ @support.cpython_only
++ def test_sizeof(self):
++ bufsize1 = 4096
++ bufsize2 = 8192
++ rawio = self.MockRawIO()
++ bufio = self.tp(rawio, buffer_size=bufsize1)
++ size = sys.getsizeof(bufio) - bufsize1
++ rawio = self.MockRawIO()
++ bufio = self.tp(rawio, buffer_size=bufsize2)
++ self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
++
++
+ class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
+ read_mode = "rb"
+
+@@ -918,7 +945,7 @@
+ "failed for {}: {} != 0".format(n, rawio._extraneous_reads))
+
+
+-class CBufferedReaderTest(BufferedReaderTest):
++class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
+ tp = io.BufferedReader
+
+ def test_constructor(self):
+@@ -1181,7 +1208,7 @@
+ self.tp(self.MockRawIO(), 8, 12)
+
+
+-class CBufferedWriterTest(BufferedWriterTest):
++class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
+ tp = io.BufferedWriter
+
+ def test_constructor(self):
+@@ -1569,8 +1596,8 @@
+ f.flush()
+ self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
+
+-
+-class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest, BufferedRandomTest):
++class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
++ BufferedRandomTest, SizeofTest):
+ tp = io.BufferedRandom
+
+ def test_constructor(self):
+diff -r 70274d53c1dd Lib/test/test_logging.py
+--- a/Lib/test/test_logging.py
++++ b/Lib/test/test_logging.py
+@@ -1,6 +1,6 @@
+ #!/usr/bin/env python
+ #
+-# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
++# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
+ #
+ # Permission to use, copy, modify, and distribute this software and its
+ # documentation for any purpose and without fee is hereby granted,
+@@ -18,7 +18,7 @@
+
+ """Test harness for the logging module. Run all tests.
+
+-Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
++Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
+ """
+
+ import logging
+@@ -31,6 +31,7 @@
+ import gc
+ import json
+ import os
++import random
+ import re
+ import select
+ import socket
+@@ -40,6 +41,7 @@
+ import tempfile
+ from test.test_support import captured_stdout, run_with_locale, run_unittest
+ import textwrap
++import time
+ import unittest
+ import warnings
+ import weakref
+@@ -1873,6 +1875,47 @@
+ self.assertTrue(c2 is c3)
+
+
++class HandlerTest(BaseTest):
++
++ @unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
++ @unittest.skipUnless(threading, 'Threading required for this test.')
++ def test_race(self):
++ # Issue #14632 refers.
++ def remove_loop(fname, tries):
++ for _ in range(tries):
++ try:
++ os.unlink(fname)
++ except OSError:
++ pass
++ time.sleep(0.004 * random.randint(0, 4))
++
++ del_count = 500
++ log_count = 500
++
++ for delay in (False, True):
++ fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
++ os.close(fd)
++ remover = threading.Thread(target=remove_loop, args=(fn, del_count))
++ remover.daemon = True
++ remover.start()
++ h = logging.handlers.WatchedFileHandler(fn, delay=delay)
++ f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
++ h.setFormatter(f)
++ try:
++ for _ in range(log_count):
++ time.sleep(0.005)
++ r = logging.makeLogRecord({'msg': 'testing' })
++ h.handle(r)
++ finally:
++ remover.join()
++ try:
++ h.close()
++ except ValueError:
++ pass
++ if os.path.exists(fn):
++ os.unlink(fn)
++
++
+ # Set the locale to the platform-dependent default. I have no idea
+ # why the test does this, but in any case we save the current locale
+ # first and restore it at the end.
+@@ -1882,7 +1925,7 @@
+ CustomLevelsAndFiltersTest, MemoryHandlerTest,
+ ConfigFileTest, SocketHandlerTest, MemoryTest,
+ EncodingTest, WarningsTest, ConfigDictTest, ManagerTest,
+- ChildLoggerTest)
++ ChildLoggerTest, HandlerTest)
+
+ if __name__ == "__main__":
+ test_main()
+diff -r 70274d53c1dd Lib/test/test_mailbox.py
+--- a/Lib/test/test_mailbox.py
++++ b/Lib/test/test_mailbox.py
+@@ -6,7 +6,9 @@
+ import email
+ import email.message
+ import re
++import shutil
+ import StringIO
++import tempfile
+ from test import test_support
+ import unittest
+ import mailbox
+@@ -19,7 +21,7 @@
+ # Silence Py3k warning
+ rfc822 = test_support.import_module('rfc822', deprecated=True)
+
+-class TestBase(unittest.TestCase):
++class TestBase:
+
+ def _check_sample(self, msg):
+ # Inspect a mailbox.Message representation of the sample message
+@@ -38,12 +40,7 @@
+ def _delete_recursively(self, target):
+ # Delete a file or delete a directory recursively
+ if os.path.isdir(target):
+- for path, dirs, files in os.walk(target, topdown=False):
+- for name in files:
+- os.remove(os.path.join(path, name))
+- for name in dirs:
+- os.rmdir(os.path.join(path, name))
+- os.rmdir(target)
++ shutil.rmtree(target)
+ elif os.path.exists(target):
+ os.remove(target)
+
+@@ -79,6 +76,18 @@
+ for i in (1, 2, 3, 4):
+ self._check_sample(self._box[keys[i]])
+
++ def test_add_file(self):
++ with tempfile.TemporaryFile('w+') as f:
++ f.write(_sample_message)
++ f.seek(0)
++ key = self._box.add(f)
++ self.assertEqual(self._box.get_string(key).split('\n'),
++ _sample_message.split('\n'))
++
++ def test_add_StringIO(self):
++ key = self._box.add(StringIO.StringIO(self._template % "0"))
++ self.assertEqual(self._box.get_string(key), self._template % "0")
++
+ def test_remove(self):
+ # Remove messages using remove()
+ self._test_remove_or_delitem(self._box.remove)
+@@ -390,6 +399,17 @@
+ # Write changes to disk
+ self._test_flush_or_close(self._box.flush, True)
+
++ def test_popitem_and_flush_twice(self):
++ # See #15036.
++ self._box.add(self._template % 0)
++ self._box.add(self._template % 1)
++ self._box.flush()
++
++ self._box.popitem()
++ self._box.flush()
++ self._box.popitem()
++ self._box.flush()
++
+ def test_lock_unlock(self):
+ # Lock and unlock the mailbox
+ self.assertFalse(os.path.exists(self._get_lock_path()))
+@@ -433,7 +453,7 @@
+ return self._path + '.lock'
+
+
+-class TestMailboxSuperclass(TestBase):
++class TestMailboxSuperclass(TestBase, unittest.TestCase):
+
+ def test_notimplemented(self):
+ # Test that all Mailbox methods raise NotImplementedException.
+@@ -468,7 +488,7 @@
+ self.assertRaises(NotImplementedError, lambda: box.close())
+
+
+-class TestMaildir(TestMailbox):
++class TestMaildir(TestMailbox, unittest.TestCase):
+
+ _factory = lambda self, path, factory=None: mailbox.Maildir(path, factory)
+
+@@ -817,7 +837,49 @@
+ self._box._refresh()
+ self.assertTrue(refreshed())
+
+-class _TestMboxMMDF(TestMailbox):
++
++class _TestSingleFile(TestMailbox):
++ '''Common tests for single-file mailboxes'''
++
++ def test_add_doesnt_rewrite(self):
++ # When only adding messages, flush() should not rewrite the
++ # mailbox file. See issue #9559.
++
++ # Inode number changes if the contents are written to another
++ # file which is then renamed over the original file. So we
++ # must check that the inode number doesn't change.
++ inode_before = os.stat(self._path).st_ino
++
++ self._box.add(self._template % 0)
++ self._box.flush()
++
++ inode_after = os.stat(self._path).st_ino
++ self.assertEqual(inode_before, inode_after)
++
++ # Make sure the message was really added
++ self._box.close()
++ self._box = self._factory(self._path)
++ self.assertEqual(len(self._box), 1)
++
++ def test_permissions_after_flush(self):
++ # See issue #5346
++
++ # Make the mailbox world writable. It's unlikely that the new
++ # mailbox file would have these permissions after flush(),
++ # because umask usually prevents it.
++ mode = os.stat(self._path).st_mode | 0o666
++ os.chmod(self._path, mode)
++
++ self._box.add(self._template % 0)
++ i = self._box.add(self._template % 1)
++ # Need to remove one message to make flush() create a new file
++ self._box.remove(i)
++ self._box.flush()
++
++ self.assertEqual(os.stat(self._path).st_mode, mode)
++
++
++class _TestMboxMMDF(_TestSingleFile):
+
+ def tearDown(self):
+ self._box.close()
+@@ -918,7 +980,7 @@
+ self._box.close()
+
+
+-class TestMbox(_TestMboxMMDF):
++class TestMbox(_TestMboxMMDF, unittest.TestCase):
+
+ _factory = lambda self, path, factory=None: mailbox.mbox(path, factory)
+
+@@ -941,12 +1003,12 @@
+ perms = st.st_mode
+ self.assertFalse((perms & 0111)) # Execute bits should all be off.
+
+-class TestMMDF(_TestMboxMMDF):
++class TestMMDF(_TestMboxMMDF, unittest.TestCase):
+
+ _factory = lambda self, path, factory=None: mailbox.MMDF(path, factory)
+
+
+-class TestMH(TestMailbox):
++class TestMH(TestMailbox, unittest.TestCase):
+
+ _factory = lambda self, path, factory=None: mailbox.MH(path, factory)
+
+@@ -1078,7 +1140,7 @@
+ return os.path.join(self._path, '.mh_sequences.lock')
+
+
+-class TestBabyl(TestMailbox):
++class TestBabyl(_TestSingleFile, unittest.TestCase):
+
+ _factory = lambda self, path, factory=None: mailbox.Babyl(path, factory)
+
+@@ -1107,7 +1169,7 @@
+ self.assertEqual(set(self._box.get_labels()), set(['blah']))
+
+
+-class TestMessage(TestBase):
++class TestMessage(TestBase, unittest.TestCase):
+
+ _factory = mailbox.Message # Overridden by subclasses to reuse tests
+
+@@ -1178,7 +1240,7 @@
+ pass
+
+
+-class TestMaildirMessage(TestMessage):
++class TestMaildirMessage(TestMessage, unittest.TestCase):
+
+ _factory = mailbox.MaildirMessage
+
+@@ -1253,7 +1315,7 @@
+ self._check_sample(msg)
+
+
+-class _TestMboxMMDFMessage(TestMessage):
++class _TestMboxMMDFMessage:
+
+ _factory = mailbox._mboxMMDFMessage
+
+@@ -1300,12 +1362,12 @@
+ r"\d{2} \d{4}", msg.get_from()))
+
+
+-class TestMboxMessage(_TestMboxMMDFMessage):
++class TestMboxMessage(_TestMboxMMDFMessage, TestMessage):
+
+ _factory = mailbox.mboxMessage
+
+
+-class TestMHMessage(TestMessage):
++class TestMHMessage(TestMessage, unittest.TestCase):
+
+ _factory = mailbox.MHMessage
+
+@@ -1336,7 +1398,7 @@
+ self.assertEqual(msg.get_sequences(), ['foobar', 'replied'])
+
+
+-class TestBabylMessage(TestMessage):
++class TestBabylMessage(TestMessage, unittest.TestCase):
+
+ _factory = mailbox.BabylMessage
+
+@@ -1391,12 +1453,12 @@
+ self.assertEqual(visible[header], msg[header])
+
+
+-class TestMMDFMessage(_TestMboxMMDFMessage):
++class TestMMDFMessage(_TestMboxMMDFMessage, TestMessage):
+
+ _factory = mailbox.MMDFMessage
+
+
+-class TestMessageConversion(TestBase):
++class TestMessageConversion(TestBase, unittest.TestCase):
+
+ def test_plain_to_x(self):
+ # Convert Message to all formats
+@@ -1719,7 +1781,7 @@
+ proxy.close()
+
+
+-class TestProxyFile(TestProxyFileBase):
++class TestProxyFile(TestProxyFileBase, unittest.TestCase):
+
+ def setUp(self):
+ self._path = test_support.TESTFN
+@@ -1768,7 +1830,7 @@
+ self._test_close(mailbox._ProxyFile(self._file))
+
+
+-class TestPartialFile(TestProxyFileBase):
++class TestPartialFile(TestProxyFileBase, unittest.TestCase):
+
+ def setUp(self):
+ self._path = test_support.TESTFN
+diff -r 70274d53c1dd Lib/test/test_memoryio.py
+--- a/Lib/test/test_memoryio.py
++++ b/Lib/test/test_memoryio.py
+@@ -638,6 +638,17 @@
+ memio.close()
+ self.assertRaises(ValueError, memio.__setstate__, (b"closed", 0, None))
+
++ check_sizeof = support.check_sizeof
++
++ @support.cpython_only
++ def test_sizeof(self):
++ basesize = support.calcobjsize(b'P2PP2P')
++ check = self.check_sizeof
++ self.assertEqual(object.__sizeof__(io.BytesIO()), basesize)
++ check(io.BytesIO(), basesize )
++ check(io.BytesIO(b'a'), basesize + 1 + 1 )
++ check(io.BytesIO(b'a' * 1000), basesize + 1000 + 1 )
++
+
+ class CStringIOTest(PyStringIOTest):
+ ioclass = io.StringIO
+diff -r 70274d53c1dd Lib/test/test_multiprocessing.py
+--- a/Lib/test/test_multiprocessing.py
++++ b/Lib/test/test_multiprocessing.py
+@@ -16,6 +16,7 @@
+ import random
+ import logging
+ import errno
++import test.script_helper
+ from test import test_support
+ from StringIO import StringIO
+ _multiprocessing = test_support.import_module('_multiprocessing')
+@@ -325,6 +326,36 @@
+ ]
+ self.assertEqual(result, expected)
+
++ @classmethod
++ def _test_sys_exit(cls, reason, testfn):
++ sys.stderr = open(testfn, 'w')
++ sys.exit(reason)
++
++ def test_sys_exit(self):
++ # See Issue 13854
++ if self.TYPE == 'threads':
++ return
++
++ testfn = test_support.TESTFN
++ self.addCleanup(test_support.unlink, testfn)
++
++ for reason, code in (([1, 2, 3], 1), ('ignore this', 0)):
++ p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
++ p.daemon = True
++ p.start()
++ p.join(5)
++ self.assertEqual(p.exitcode, code)
++
++ with open(testfn, 'r') as f:
++ self.assertEqual(f.read().rstrip(), str(reason))
++
++ for reason in (True, False, 8):
++ p = self.Process(target=sys.exit, args=(reason,))
++ p.daemon = True
++ p.start()
++ p.join(5)
++ self.assertEqual(p.exitcode, reason)
++
+ #
+ #
+ #
+@@ -1152,6 +1183,36 @@
+ join()
+ self.assertTrue(join.elapsed < 0.2)
+
++ def test_empty_iterable(self):
++ # See Issue 12157
++ p = self.Pool(1)
++
++ self.assertEqual(p.map(sqr, []), [])
++ self.assertEqual(list(p.imap(sqr, [])), [])
++ self.assertEqual(list(p.imap_unordered(sqr, [])), [])
++ self.assertEqual(p.map_async(sqr, []).get(), [])
++
++ p.close()
++ p.join()
++
++def unpickleable_result():
++ return lambda: 42
++
++class _TestPoolWorkerErrors(BaseTestCase):
++ ALLOWED_TYPES = ('processes', )
++
++ def test_unpickleable_result(self):
++ from multiprocessing.pool import MaybeEncodingError
++ p = multiprocessing.Pool(2)
++
++ # Make sure we don't lose pool processes because of encoding errors.
++ for iteration in range(20):
++ res = p.apply_async(unpickleable_result)
++ self.assertRaises(MaybeEncodingError, res.get)
++
++ p.close()
++ p.join()
++
+ class _TestPoolWorkerLifetime(BaseTestCase):
+
+ ALLOWED_TYPES = ('processes', )
+@@ -1651,6 +1712,23 @@
+ self.assertEqual(conn.recv(), 'hello')
+ p.join()
+ l.close()
++
++ def test_issue14725(self):
++ l = self.connection.Listener()
++ p = self.Process(target=self._test, args=(l.address,))
++ p.daemon = True
++ p.start()
++ time.sleep(1)
++ # On Windows the client process should by now have connected,
++ # written data and closed the pipe handle by now. This causes
++ # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
++ # 14725.
++ conn = l.accept()
++ self.assertEqual(conn.recv(), 'hello')
++ conn.close()
++ p.join()
++ l.close()
++
+ #
+ # Test of sending connection and socket objects between processes
+ #
+@@ -2078,7 +2156,7 @@
+ 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
+ 'Condition', 'Event', 'Value', 'Array', 'RawValue',
+ 'RawArray', 'current_process', 'active_children', 'Pipe',
+- 'connection', 'JoinableQueue'
++ 'connection', 'JoinableQueue', 'Pool'
+ )))
+
+ testcases_processes = create_test_cases(ProcessesMixin, type='processes')
+@@ -2092,7 +2170,7 @@
+ locals().update(get_attributes(manager, (
+ 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
+ 'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
+- 'Namespace', 'JoinableQueue'
++ 'Namespace', 'JoinableQueue', 'Pool'
+ )))
+
+ testcases_manager = create_test_cases(ManagerMixin, type='manager')
+@@ -2106,7 +2184,7 @@
+ 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
+ 'Condition', 'Event', 'Value', 'Array', 'current_process',
+ 'active_children', 'Pipe', 'connection', 'dict', 'list',
+- 'Namespace', 'JoinableQueue'
++ 'Namespace', 'JoinableQueue', 'Pool'
+ )))
+
+ testcases_threads = create_test_cases(ThreadsMixin, type='threads')
+@@ -2238,8 +2316,62 @@
+ flike.flush()
+ assert sio.getvalue() == 'foo'
+
++#
++# Test interaction with socket timeouts - see Issue #6056
++#
++
++class TestTimeouts(unittest.TestCase):
++ @classmethod
++ def _test_timeout(cls, child, address):
++ time.sleep(1)
++ child.send(123)
++ child.close()
++ conn = multiprocessing.connection.Client(address)
++ conn.send(456)
++ conn.close()
++
++ def test_timeout(self):
++ old_timeout = socket.getdefaulttimeout()
++ try:
++ socket.setdefaulttimeout(0.1)
++ parent, child = multiprocessing.Pipe(duplex=True)
++ l = multiprocessing.connection.Listener(family='AF_INET')
++ p = multiprocessing.Process(target=self._test_timeout,
++ args=(child, l.address))
++ p.start()
++ child.close()
++ self.assertEqual(parent.recv(), 123)
++ parent.close()
++ conn = l.accept()
++ self.assertEqual(conn.recv(), 456)
++ conn.close()
++ l.close()
++ p.join(10)
++ finally:
++ socket.setdefaulttimeout(old_timeout)
++
++#
++# Test what happens with no "if __name__ == '__main__'"
++#
++
++class TestNoForkBomb(unittest.TestCase):
++ def test_noforkbomb(self):
++ name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
++ if WIN32:
++ rc, out, err = test.script_helper.assert_python_failure(name)
++ self.assertEqual('', out.decode('ascii'))
++ self.assertIn('RuntimeError', err.decode('ascii'))
++ else:
++ rc, out, err = test.script_helper.assert_python_ok(name)
++ self.assertEqual('123', out.decode('ascii').rstrip())
++ self.assertEqual('', err.decode('ascii'))
++
++#
++#
++#
++
+ testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
+- TestStdinBadfiledescriptor]
++ TestStdinBadfiledescriptor, TestTimeouts, TestNoForkBomb]
+
+ #
+ #
+diff -r 70274d53c1dd Lib/test/test_optparse.py
+--- a/Lib/test/test_optparse.py
++++ b/Lib/test/test_optparse.py
+@@ -769,6 +769,13 @@
+ self.assertParseFail(["-test"],
+ "no such option: -e")
+
++ def test_add_option_accepts_unicode(self):
++ self.parser.add_option(u"-u", u"--unicode", action="store_true")
++ self.assertParseOK(["-u"],
++ {'a': None, 'boo': None, 'foo': None, 'unicode': True},
++ [])
++
++
+ class TestBool(BaseTest):
+ def setUp(self):
+ options = [make_option("-v",
+diff -r 70274d53c1dd Lib/test/test_parser.py
+--- a/Lib/test/test_parser.py
++++ b/Lib/test/test_parser.py
+@@ -1,7 +1,8 @@
+ import parser
+ import unittest
+ import sys
+-from test import test_support
++import struct
++from test import test_support as support
+
+ #
+ # First, we test that we can generate trees from valid source fragments,
+@@ -583,12 +584,57 @@
+ print >>sys.stderr, "Expecting 's_push: parser stack overflow' in next line"
+ self.assertRaises(MemoryError, parser.expr, e)
+
++class STObjectTestCase(unittest.TestCase):
++ """Test operations on ST objects themselves"""
++
++ check_sizeof = support.check_sizeof
++
++ @support.cpython_only
++ def test_sizeof(self):
++ def XXXROUNDUP(n):
++ if n <= 1:
++ return n
++ if n <= 128:
++ return (n + 3) & ~3
++ return 1 << (n - 1).bit_length()
++
++ basesize = support.calcobjsize('Pii')
++ nodesize = struct.calcsize('hP3iP0h')
++ def sizeofchildren(node):
++ if node is None:
++ return 0
++ res = 0
++ hasstr = len(node) > 1 and isinstance(node[-1], str)
++ if hasstr:
++ res += len(node[-1]) + 1
++ children = node[1:-1] if hasstr else node[1:]
++ if children:
++ res += XXXROUNDUP(len(children)) * nodesize
++ for child in children:
++ res += sizeofchildren(child)
++ return res
++
++ def check_st_sizeof(st):
++ self.check_sizeof(st, basesize + nodesize +
++ sizeofchildren(st.totuple()))
++
++ check_st_sizeof(parser.expr('2 + 3'))
++ check_st_sizeof(parser.expr('2 + 3 + 4'))
++ check_st_sizeof(parser.suite('x = 2 + 3'))
++ check_st_sizeof(parser.suite(''))
++ check_st_sizeof(parser.suite('# -*- coding: utf-8 -*-'))
++ check_st_sizeof(parser.expr('[' + '2,' * 1000 + ']'))
++
++
++ # XXX tests for pickling and unpickling of ST objects should go here
++
+ def test_main():
+- test_support.run_unittest(
++ support.run_unittest(
+ RoundtripLegalSyntaxTestCase,
+ IllegalSyntaxTestCase,
+ CompileTestCase,
+ ParserStackLimitTestCase,
++ STObjectTestCase,
+ )
+
+
+diff -r 70274d53c1dd Lib/test/test_pdb.py
+--- a/Lib/test/test_pdb.py
++++ b/Lib/test/test_pdb.py
+@@ -6,12 +6,69 @@
+ import os
+ import unittest
+ import subprocess
++import textwrap
+
+ from test import test_support
+ # This little helper class is essential for testing pdb under doctest.
+ from test_doctest import _FakeInput
+
+
++class PdbTestCase(unittest.TestCase):
++
++ def run_pdb(self, script, commands):
++ """Run 'script' lines with pdb and the pdb 'commands'."""
++ filename = 'main.py'
++ with open(filename, 'w') as f:
++ f.write(textwrap.dedent(script))
++ self.addCleanup(test_support.unlink, filename)
++ cmd = [sys.executable, '-m', 'pdb', filename]
++ stdout = stderr = None
++ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
++ stdin=subprocess.PIPE,
++ stderr=subprocess.STDOUT,
++ )
++ stdout, stderr = proc.communicate(commands)
++ proc.stdout.close()
++ proc.stdin.close()
++ return stdout, stderr
++
++ def test_issue13183(self):
++ script = """
++ from bar import bar
++
++ def foo():
++ bar()
++
++ def nope():
++ pass
++
++ def foobar():
++ foo()
++ nope()
++
++ foobar()
++ """
++ commands = """
++ from bar import bar
++ break bar
++ continue
++ step
++ step
++ quit
++ """
++ bar = """
++ def bar():
++ pass
++ """
++ with open('bar.py', 'w') as f:
++ f.write(textwrap.dedent(bar))
++ self.addCleanup(test_support.unlink, 'bar.py')
++ stdout, stderr = self.run_pdb(script, commands)
++ self.assertTrue(
++ any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
++ 'Fail to step into the caller after a return')
++
++
+ class PdbTestInput(object):
+ """Context manager that makes testing Pdb in doctests easier."""
+
+@@ -309,7 +366,9 @@
+ def test_main():
+ from test import test_pdb
+ test_support.run_doctest(test_pdb, verbosity=True)
+- test_support.run_unittest(ModuleInitTester)
++ test_support.run_unittest(
++ PdbTestCase,
++ ModuleInitTester)
+
+ if __name__ == '__main__':
+ test_main()
+diff -r 70274d53c1dd Lib/test/test_posix.py
+--- a/Lib/test/test_posix.py
++++ b/Lib/test/test_posix.py
+@@ -9,6 +9,7 @@
+ import sys
+ import time
+ import os
++import platform
+ import pwd
+ import shutil
+ import stat
+@@ -107,7 +108,11 @@
+ # If a non-privileged user invokes it, it should fail with OSError
+ # EPERM.
+ if os.getuid() != 0:
+- name = pwd.getpwuid(posix.getuid()).pw_name
++ try:
++ name = pwd.getpwuid(posix.getuid()).pw_name
++ except KeyError:
++ # the current UID may not have a pwd entry
++ raise unittest.SkipTest("need a pwd entry")
+ try:
+ posix.initgroups(name, 13)
+ except OSError as e:
+@@ -219,6 +224,9 @@
+
+ def _test_all_chown_common(self, chown_func, first_param):
+ """Common code for chown, fchown and lchown tests."""
++ # test a successful chown call
++ chown_func(first_param, os.getuid(), os.getgid())
++
+ if os.getuid() == 0:
+ try:
+ # Many linux distros have a nfsnobody user as MAX_UID-2
+@@ -230,14 +238,16 @@
+ chown_func(first_param, ent.pw_uid, ent.pw_gid)
+ except KeyError:
+ pass
++ elif platform.system() in ('HP-UX', 'SunOS'):
++ # HP-UX and Solaris can allow a non-root user to chown() to root
++ # (issue #5113)
++ raise unittest.SkipTest("Skipping because of non-standard chown() "
++ "behavior")
+ else:
+ # non-root cannot chown to root, raises OSError
+ self.assertRaises(OSError, chown_func,
+ first_param, 0, 0)
+
+- # test a successful chown call
+- chown_func(first_param, os.getuid(), os.getgid())
+-
+ @unittest.skipUnless(hasattr(posix, 'chown'), "test needs os.chown()")
+ def test_chown(self):
+ # raise an OSError if the file does not exist
+@@ -412,8 +422,9 @@
+ def test_getgroups(self):
+ with os.popen('id -G') as idg:
+ groups = idg.read().strip()
++ ret = idg.close()
+
+- if not groups:
++ if ret != None or not groups:
+ raise unittest.SkipTest("need working 'id -G'")
+
+ # 'id -G' and 'os.getgroups()' should return the same
+diff -r 70274d53c1dd Lib/test/test_posixpath.py
+--- a/Lib/test/test_posixpath.py
++++ b/Lib/test/test_posixpath.py
+@@ -201,6 +201,7 @@
+ with test_support.EnvironmentVarGuard() as env:
+ env['HOME'] = '/'
+ self.assertEqual(posixpath.expanduser("~"), "/")
++ self.assertEqual(posixpath.expanduser("~/foo"), "/foo")
+
+ def test_normpath(self):
+ self.assertEqual(posixpath.normpath(""), ".")
+diff -r 70274d53c1dd Lib/test/test_pyclbr.py
+--- a/Lib/test/test_pyclbr.py
++++ b/Lib/test/test_pyclbr.py
+@@ -188,6 +188,11 @@
+ cm('email.parser')
+ cm('test.test_pyclbr')
+
++ def test_issue_14798(self):
++ # test ImportError is raised when the first part of a dotted name is
++ # not a package
++ self.assertRaises(ImportError, pyclbr.readmodule_ex, 'asyncore.foo')
++
+
+ def test_main():
+ run_unittest(PyclbrTest)
+diff -r 70274d53c1dd Lib/test/test_pydoc.py
+--- a/Lib/test/test_pydoc.py
++++ b/Lib/test/test_pydoc.py
+@@ -249,6 +249,17 @@
+ result, doc_loc = get_pydoc_text(xml.etree)
+ self.assertEqual(doc_loc, "", "MODULE DOCS incorrectly includes a link")
+
++ def test_non_str_name(self):
++ # issue14638
++ # Treat illegal (non-str) name like no name
++ class A:
++ __name__ = 42
++ class B:
++ pass
++ adoc = pydoc.render_doc(A())
++ bdoc = pydoc.render_doc(B())
++ self.assertEqual(adoc.replace("A", "B"), bdoc)
++
+ def test_not_here(self):
+ missing_module = "test.i_am_not_here"
+ result = run_pydoc(missing_module)
+diff -r 70274d53c1dd Lib/test/test_queue.py
+--- a/Lib/test/test_queue.py
++++ b/Lib/test/test_queue.py
+@@ -79,7 +79,7 @@
+ self.fail("trigger thread ended but event never set")
+
+
+-class BaseQueueTest(unittest.TestCase, BlockingTestMixin):
++class BaseQueueTest(BlockingTestMixin):
+ def setUp(self):
+ self.cum = 0
+ self.cumlock = threading.Lock()
+@@ -191,13 +191,13 @@
+ self.simple_queue_test(q)
+
+
+-class QueueTest(BaseQueueTest):
++class QueueTest(BaseQueueTest, unittest.TestCase):
+ type2test = Queue.Queue
+
+-class LifoQueueTest(BaseQueueTest):
++class LifoQueueTest(BaseQueueTest, unittest.TestCase):
+ type2test = Queue.LifoQueue
+
+-class PriorityQueueTest(BaseQueueTest):
++class PriorityQueueTest(BaseQueueTest, unittest.TestCase):
+ type2test = Queue.PriorityQueue
+
+
+diff -r 70274d53c1dd Lib/test/test_random.py
+--- a/Lib/test/test_random.py
++++ b/Lib/test/test_random.py
+@@ -57,6 +57,14 @@
+ self.assertRaises(TypeError, self.gen.jumpahead) # needs an arg
+ self.assertRaises(TypeError, self.gen.jumpahead, 2, 3) # too many
+
++ def test_jumpahead_produces_valid_state(self):
++ # From http://bugs.python.org/issue14591.
++ self.gen.seed(199210368)
++ self.gen.jumpahead(13550674232554645900)
++ for i in range(500):
++ val = self.gen.random()
++ self.assertLess(val, 1.0)
++
+ def test_sample(self):
+ # For the entire allowable range of 0 <= k <= N, validate that
+ # the sample is of the correct length and contains only unique items
+diff -r 70274d53c1dd Lib/test/test_re.py
+--- a/Lib/test/test_re.py
++++ b/Lib/test/test_re.py
+@@ -373,6 +373,32 @@
+ self.assertEqual(re.search(r"\d\D\w\W\s\S",
+ "1aa! a", re.UNICODE).group(0), "1aa! a")
+
++ def test_string_boundaries(self):
++ # See http://bugs.python.org/issue10713
++ self.assertEqual(re.search(r"\b(abc)\b", "abc").group(1),
++ "abc")
++ # There's a word boundary at the start of a string.
++ self.assertTrue(re.match(r"\b", "abc"))
++ # A non-empty string includes a non-boundary zero-length match.
++ self.assertTrue(re.search(r"\B", "abc"))
++ # There is no non-boundary match at the start of a string.
++ self.assertFalse(re.match(r"\B", "abc"))
++ # However, an empty string contains no word boundaries, and also no
++ # non-boundaries.
++ self.assertEqual(re.search(r"\B", ""), None)
++ # This one is questionable and different from the perlre behaviour,
++ # but describes current behavior.
++ self.assertEqual(re.search(r"\b", ""), None)
++ # A single word-character string has two boundaries, but no
++ # non-boundary gaps.
++ self.assertEqual(len(re.findall(r"\b", "a")), 2)
++ self.assertEqual(len(re.findall(r"\B", "a")), 0)
++ # If there are no words, there are no boundaries
++ self.assertEqual(len(re.findall(r"\b", " ")), 0)
++ self.assertEqual(len(re.findall(r"\b", " ")), 0)
++ # Can match around the whitespace.
++ self.assertEqual(len(re.findall(r"\B", " ")), 2)
++
+ def test_bigcharset(self):
+ self.assertEqual(re.match(u"([\u2222\u2223])",
+ u"\u2222").group(1), u"\u2222")
+@@ -757,6 +783,16 @@
+ self.assertRaises(TypeError, re.finditer, "a", {})
+ self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow])
+
++ def test_compile(self):
++ # Test return value when given string and pattern as parameter
++ pattern = re.compile('random pattern')
++ self.assertIsInstance(pattern, re._pattern_type)
++ same_pattern = re.compile(pattern)
++ self.assertIsInstance(same_pattern, re._pattern_type)
++ self.assertIs(same_pattern, pattern)
++ # Test behaviour when not given a string or pattern as parameter
++ self.assertRaises(TypeError, re.compile, 0)
++
+ def run_re_tests():
+ from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR
+ if verbose:
+diff -r 70274d53c1dd Lib/test/test_readline.py
+--- a/Lib/test/test_readline.py
++++ b/Lib/test/test_readline.py
+@@ -12,6 +12,10 @@
+ readline = import_module('readline')
+
+ class TestHistoryManipulation (unittest.TestCase):
++
++ @unittest.skipIf(not hasattr(readline, 'clear_history'),
++ "The history update test cannot be run because the "
++ "clear_history method is not available.")
+ def testHistoryUpdates(self):
+ readline.clear_history()
+
+diff -r 70274d53c1dd Lib/test/test_repr.py
+--- a/Lib/test/test_repr.py
++++ b/Lib/test/test_repr.py
+@@ -130,10 +130,10 @@
+ def test_file(self):
+ fp = open(unittest.__file__)
+ self.assertTrue(repr(fp).startswith(
+- "<open file '%s', mode 'r' at 0x" % unittest.__file__))
++ "<open file %r, mode 'r' at 0x" % unittest.__file__))
+ fp.close()
+ self.assertTrue(repr(fp).startswith(
+- "<closed file '%s', mode 'r' at 0x" % unittest.__file__))
++ "<closed file %r, mode 'r' at 0x" % unittest.__file__))
+
+ def test_lambda(self):
+ self.assertTrue(repr(lambda x: x).startswith(
+diff -r 70274d53c1dd Lib/test/test_shutil.py
+--- a/Lib/test/test_shutil.py
++++ b/Lib/test/test_shutil.py
+@@ -7,6 +7,7 @@
+ import stat
+ import os
+ import os.path
++import errno
+ from os.path import splitdrive
+ from distutils.spawn import find_executable, spawn
+ from shutil import (_make_tarball, _make_zipfile, make_archive,
+@@ -339,6 +340,35 @@
+ shutil.rmtree(TESTFN, ignore_errors=True)
+ shutil.rmtree(TESTFN2, ignore_errors=True)
+
++ @unittest.skipUnless(hasattr(os, 'chflags') and
++ hasattr(errno, 'EOPNOTSUPP') and
++ hasattr(errno, 'ENOTSUP'),
++ "requires os.chflags, EOPNOTSUPP & ENOTSUP")
++ def test_copystat_handles_harmless_chflags_errors(self):
++ tmpdir = self.mkdtemp()
++ file1 = os.path.join(tmpdir, 'file1')
++ file2 = os.path.join(tmpdir, 'file2')
++ self.write_file(file1, 'xxx')
++ self.write_file(file2, 'xxx')
++
++ def make_chflags_raiser(err):
++ ex = OSError()
++
++ def _chflags_raiser(path, flags):
++ ex.errno = err
++ raise ex
++ return _chflags_raiser
++ old_chflags = os.chflags
++ try:
++ for err in errno.EOPNOTSUPP, errno.ENOTSUP:
++ os.chflags = make_chflags_raiser(err)
++ shutil.copystat(file1, file2)
++ # assert others errors break it
++ os.chflags = make_chflags_raiser(errno.EOPNOTSUPP + errno.ENOTSUP)
++ self.assertRaises(OSError, shutil.copystat, file1, file2)
++ finally:
++ os.chflags = old_chflags
++
+ @unittest.skipUnless(zlib, "requires zlib")
+ def test_make_tarball(self):
+ # creating something to tar
+diff -r 70274d53c1dd Lib/test/test_socketserver.py
+--- a/Lib/test/test_socketserver.py
++++ b/Lib/test/test_socketserver.py
+@@ -8,6 +8,8 @@
+ import select
+ import signal
+ import socket
++import select
++import errno
+ import tempfile
+ import unittest
+ import SocketServer
+@@ -32,8 +34,11 @@
+ if hasattr(signal, 'alarm'):
+ signal.alarm(n)
+
++# Remember real select() to avoid interferences with mocking
++_real_select = select.select
++
+ def receive(sock, n, timeout=20):
+- r, w, x = select.select([sock], [], [], timeout)
++ r, w, x = _real_select([sock], [], [], timeout)
+ if sock in r:
+ return sock.recv(n)
+ else:
+@@ -225,6 +230,38 @@
+ SocketServer.DatagramRequestHandler,
+ self.dgram_examine)
+
++ @contextlib.contextmanager
++ def mocked_select_module(self):
++ """Mocks the select.select() call to raise EINTR for first call"""
++ old_select = select.select
++
++ class MockSelect:
++ def __init__(self):
++ self.called = 0
++
++ def __call__(self, *args):
++ self.called += 1
++ if self.called == 1:
++ # raise the exception on first call
++ raise select.error(errno.EINTR, os.strerror(errno.EINTR))
++ else:
++ # Return real select value for consecutive calls
++ return old_select(*args)
++
++ select.select = MockSelect()
++ try:
++ yield select.select
++ finally:
++ select.select = old_select
++
++ def test_InterruptServerSelectCall(self):
++ with self.mocked_select_module() as mock_select:
++ pid = self.run_server(SocketServer.TCPServer,
++ SocketServer.StreamRequestHandler,
++ self.stream_examine)
++ # Make sure select was called again:
++ self.assertGreater(mock_select.called, 1)
++
+ # Alas, on Linux (at least) recvfrom() doesn't return a meaningful
+ # client address so this cannot work:
+
+diff -r 70274d53c1dd Lib/test/test_ssl.py
+--- a/Lib/test/test_ssl.py
++++ b/Lib/test/test_ssl.py
+@@ -355,7 +355,8 @@
+ # SHA256 was added in OpenSSL 0.9.8
+ if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
+ self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
+- # NOTE: https://sha256.tbs-internet.com is another possible test host
++ self.skipTest("remote host needs SNI, only available on Python 3.2+")
++ # NOTE: https://sha2.hboeck.de is another possible test host
+ remote = ("sha256.tbs-internet.com", 443)
+ sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
+ with test_support.transient_internet("sha256.tbs-internet.com"):
+diff -r 70274d53c1dd Lib/test/test_strptime.py
+--- a/Lib/test/test_strptime.py
++++ b/Lib/test/test_strptime.py
+@@ -38,9 +38,9 @@
+ comparison = testing[self.time_tuple[tuple_position]]
+ self.assertIn(strftime_output, testing,
+ "%s: not found in tuple" % error_msg)
+- self.assertTrue(comparison == strftime_output,
+- "%s: position within tuple incorrect; %s != %s" %
+- (error_msg, comparison, strftime_output))
++ self.assertEqual(comparison, strftime_output,
++ "%s: position within tuple incorrect; %s != %s" %
++ (error_msg, comparison, strftime_output))
+
+ def test_weekday(self):
+ # Make sure that full and abbreviated weekday names are correct in
+@@ -65,8 +65,8 @@
+ "AM/PM representation not in tuple")
+ if self.time_tuple[3] < 12: position = 0
+ else: position = 1
+- self.assertTrue(strftime_output == self.LT_ins.am_pm[position],
+- "AM/PM representation in the wrong position within the tuple")
++ self.assertEqual(self.LT_ins.am_pm[position], strftime_output,
++ "AM/PM representation in the wrong position within the tuple")
+
+ def test_timezone(self):
+ # Make sure timezone is correct
+@@ -86,17 +86,14 @@
+ # output.
+ magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0)
+ strftime_output = time.strftime("%c", magic_date)
+- self.assertTrue(strftime_output == time.strftime(self.LT_ins.LC_date_time,
+- magic_date),
+- "LC_date_time incorrect")
++ self.assertEqual(time.strftime(self.LT_ins.LC_date_time, magic_date),
++ strftime_output, "LC_date_time incorrect")
+ strftime_output = time.strftime("%x", magic_date)
+- self.assertTrue(strftime_output == time.strftime(self.LT_ins.LC_date,
+- magic_date),
+- "LC_date incorrect")
++ self.assertEqual(time.strftime(self.LT_ins.LC_date, magic_date),
++ strftime_output, "LC_date incorrect")
+ strftime_output = time.strftime("%X", magic_date)
+- self.assertTrue(strftime_output == time.strftime(self.LT_ins.LC_time,
+- magic_date),
+- "LC_time incorrect")
++ self.assertEqual(time.strftime(self.LT_ins.LC_time, magic_date),
++ strftime_output, "LC_time incorrect")
+ LT = _strptime.LocaleTime()
+ LT.am_pm = ('', '')
+ self.assertTrue(LT.LC_time, "LocaleTime's LC directives cannot handle "
+@@ -168,8 +165,8 @@
+ # Fixes bug #661354
+ test_locale = _strptime.LocaleTime()
+ test_locale.timezone = (frozenset(), frozenset())
+- self.assertTrue(_strptime.TimeRE(test_locale).pattern("%Z") == '',
+- "with timezone == ('',''), TimeRE().pattern('%Z') != ''")
++ self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '',
++ "with timezone == ('',''), TimeRE().pattern('%Z') != ''")
+
+ def test_matching_with_escapes(self):
+ # Make sure a format that requires escaping of characters works
+@@ -195,7 +192,7 @@
+ # so as to not allow to subpatterns to end up next to each other and
+ # "steal" characters from each other.
+ pattern = self.time_re.pattern('%j %H')
+- self.assertTrue(not re.match(pattern, "180"))
++ self.assertFalse(re.match(pattern, "180"))
+ self.assertTrue(re.match(pattern, "18 0"))
+
+
+@@ -381,6 +378,14 @@
+ need_escaping = ".^$*+?{}\[]|)("
+ self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping))
+
++ def test_feb29_on_leap_year_without_year(self):
++ time.strptime("Feb 29", "%b %d")
++
++ def test_mar1_comes_after_feb29_even_when_omitting_the_year(self):
++ self.assertLess(
++ time.strptime("Feb 29", "%b %d"),
++ time.strptime("Mar 1", "%b %d"))
++
+ class Strptime12AMPMTests(unittest.TestCase):
+ """Test a _strptime regression in '%I %p' at 12 noon (12 PM)"""
+
+diff -r 70274d53c1dd Lib/test/test_struct.py
+--- a/Lib/test/test_struct.py
++++ b/Lib/test/test_struct.py
+@@ -3,7 +3,8 @@
+ import unittest
+ import struct
+ import inspect
+-from test.test_support import run_unittest, check_warnings, check_py3k_warnings
++from test import test_support as support
++from test.test_support import (check_warnings, check_py3k_warnings)
+
+ import sys
+ ISBIGENDIAN = sys.byteorder == "big"
+@@ -544,8 +545,29 @@
+ hugecount2 = '{}b{}H'.format(sys.maxsize//2, sys.maxsize//2)
+ self.assertRaises(struct.error, struct.calcsize, hugecount2)
+
++ def check_sizeof(self, format_str, number_of_codes):
++ # The size of 'PyStructObject'
++ totalsize = support.calcobjsize('5P')
++ # The size taken up by the 'formatcode' dynamic array
++ totalsize += struct.calcsize('3P') * (number_of_codes + 1)
++ support.check_sizeof(self, struct.Struct(format_str), totalsize)
++
++ @support.cpython_only
++ def test__sizeof__(self):
++ for code in integer_codes:
++ self.check_sizeof(code, 1)
++ self.check_sizeof('BHILfdspP', 9)
++ self.check_sizeof('B' * 1234, 1234)
++ self.check_sizeof('fd', 2)
++ self.check_sizeof('xxxxxxxxxxxxxx', 0)
++ self.check_sizeof('100H', 100)
++ self.check_sizeof('187s', 1)
++ self.check_sizeof('20p', 1)
++ self.check_sizeof('0s', 1)
++ self.check_sizeof('0c', 0)
++
+ def test_main():
+- run_unittest(StructTest)
++ support.run_unittest(StructTest)
+
+ if __name__ == '__main__':
+ test_main()
+diff -r 70274d53c1dd Lib/test/test_subprocess.py
+--- a/Lib/test/test_subprocess.py
++++ b/Lib/test/test_subprocess.py
+@@ -812,6 +812,27 @@
+ getattr(p, method)(*args)
+ return p
+
++ def _kill_dead_process(self, method, *args):
++ # Do not inherit file handles from the parent.
++ # It should fix failures on some platforms.
++ p = subprocess.Popen([sys.executable, "-c", """if 1:
++ import sys, time
++ sys.stdout.write('x\\n')
++ sys.stdout.flush()
++ """],
++ close_fds=True,
++ stdin=subprocess.PIPE,
++ stdout=subprocess.PIPE,
++ stderr=subprocess.PIPE)
++ # Wait for the interpreter to be completely initialized before
++ # sending any signal.
++ p.stdout.read(1)
++ # The process should end after this
++ time.sleep(1)
++ # This shouldn't raise even though the child is now dead
++ getattr(p, method)(*args)
++ p.communicate()
++
+ def test_send_signal(self):
+ p = self._kill_process('send_signal', signal.SIGINT)
+ _, stderr = p.communicate()
+@@ -830,6 +851,18 @@
+ self.assertStderrEqual(stderr, '')
+ self.assertEqual(p.wait(), -signal.SIGTERM)
+
++ def test_send_signal_dead(self):
++ # Sending a signal to a dead process
++ self._kill_dead_process('send_signal', signal.SIGINT)
++
++ def test_kill_dead(self):
++ # Killing a dead process
++ self._kill_dead_process('kill')
++
++ def test_terminate_dead(self):
++ # Terminating a dead process
++ self._kill_dead_process('terminate')
++
+ def check_close_std_fds(self, fds):
+ # Issue #9905: test that subprocess pipes still work properly with
+ # some standard fds closed
+@@ -1126,6 +1159,31 @@
+ returncode = p.wait()
+ self.assertNotEqual(returncode, 0)
+
++ def _kill_dead_process(self, method, *args):
++ p = subprocess.Popen([sys.executable, "-c", """if 1:
++ import sys, time
++ sys.stdout.write('x\\n')
++ sys.stdout.flush()
++ sys.exit(42)
++ """],
++ stdin=subprocess.PIPE,
++ stdout=subprocess.PIPE,
++ stderr=subprocess.PIPE)
++ self.addCleanup(p.stdout.close)
++ self.addCleanup(p.stderr.close)
++ self.addCleanup(p.stdin.close)
++ # Wait for the interpreter to be completely initialized before
++ # sending any signal.
++ p.stdout.read(1)
++ # The process should end after this
++ time.sleep(1)
++ # This shouldn't raise even though the child is now dead
++ getattr(p, method)(*args)
++ _, stderr = p.communicate()
++ self.assertStderrEqual(stderr, b'')
++ rc = p.wait()
++ self.assertEqual(rc, 42)
++
+ def test_send_signal(self):
+ self._kill_process('send_signal', signal.SIGTERM)
+
+@@ -1135,6 +1193,15 @@
+ def test_terminate(self):
+ self._kill_process('terminate')
+
++ def test_send_signal_dead(self):
++ self._kill_dead_process('send_signal', signal.SIGTERM)
++
++ def test_kill_dead(self):
++ self._kill_dead_process('kill')
++
++ def test_terminate_dead(self):
++ self._kill_dead_process('terminate')
++
+
+ @unittest.skipUnless(getattr(subprocess, '_has_poll', False),
+ "poll system call not supported")
+diff -r 70274d53c1dd Lib/test/test_support.py
+--- a/Lib/test/test_support.py
++++ b/Lib/test/test_support.py
+@@ -18,6 +18,8 @@
+ import UserDict
+ import re
+ import time
++import struct
++import _testcapi
+ try:
+ import thread
+ except ImportError:
+@@ -179,15 +181,79 @@
+ except KeyError:
+ pass
+
++if sys.platform.startswith("win"):
++ def _waitfor(func, pathname, waitall=False):
++ # Peform the operation
++ func(pathname)
++ # Now setup the wait loop
++ if waitall:
++ dirname = pathname
++ else:
++ dirname, name = os.path.split(pathname)
++ dirname = dirname or '.'
++ # Check for `pathname` to be removed from the filesystem.
++ # The exponential backoff of the timeout amounts to a total
++ # of ~1 second after which the deletion is probably an error
++ # anyway.
++ # Testing on a i7@4.3GHz shows that usually only 1 iteration is
++ # required when contention occurs.
++ timeout = 0.001
++ while timeout < 1.0:
++ # Note we are only testing for the existance of the file(s) in
++ # the contents of the directory regardless of any security or
++ # access rights. If we have made it this far, we have sufficient
++ # permissions to do that much using Python's equivalent of the
++ # Windows API FindFirstFile.
++ # Other Windows APIs can fail or give incorrect results when
++ # dealing with files that are pending deletion.
++ L = os.listdir(dirname)
++ if not (L if waitall else name in L):
++ return
++ # Increase the timeout and try again
++ time.sleep(timeout)
++ timeout *= 2
++ warnings.warn('tests may fail, delete still pending for ' + pathname,
++ RuntimeWarning, stacklevel=4)
++
++ def _unlink(filename):
++ _waitfor(os.unlink, filename)
++
++ def _rmdir(dirname):
++ _waitfor(os.rmdir, dirname)
++
++ def _rmtree(path):
++ def _rmtree_inner(path):
++ for name in os.listdir(path):
++ fullname = os.path.join(path, name)
++ if os.path.isdir(fullname):
++ _waitfor(_rmtree_inner, fullname, waitall=True)
++ os.rmdir(fullname)
++ else:
++ os.unlink(fullname)
++ _waitfor(_rmtree_inner, path, waitall=True)
++ _waitfor(os.rmdir, path)
++else:
++ _unlink = os.unlink
++ _rmdir = os.rmdir
++ _rmtree = shutil.rmtree
++
+ def unlink(filename):
+ try:
+- os.unlink(filename)
++ _unlink(filename)
+ except OSError:
+ pass
+
++def rmdir(dirname):
++ try:
++ _rmdir(dirname)
++ except OSError as error:
++ # The directory need not exist.
++ if error.errno != errno.ENOENT:
++ raise
++
+ def rmtree(path):
+ try:
+- shutil.rmtree(path)
++ _rmtree(path)
+ except OSError, e:
+ # Unix returns ENOENT, Windows returns ESRCH.
+ if e.errno not in (errno.ENOENT, errno.ESRCH):
+@@ -405,7 +471,7 @@
+ the CWD, an error is raised. If it's True, only a warning is raised
+ and the original CWD is used.
+ """
+- if isinstance(name, unicode):
++ if have_unicode and isinstance(name, unicode):
+ try:
+ name = name.encode(sys.getfilesystemencoding() or 'ascii')
+ except UnicodeEncodeError:
+@@ -767,6 +833,9 @@
+ ('EAI_FAIL', -4),
+ ('EAI_NONAME', -2),
+ ('EAI_NODATA', -5),
++ # Windows defines EAI_NODATA as 11001 but idiotic getaddrinfo()
++ # implementation actually returns WSANO_DATA i.e. 11004.
++ ('WSANO_DATA', 11004),
+ ]
+
+ denied = ResourceDenied("Resource '%s' is not available" % resource_name)
+@@ -858,6 +927,32 @@
+ gc.collect()
+
+
++_header = '2P'
++if hasattr(sys, "gettotalrefcount"):
++ _header = '2P' + _header
++_vheader = _header + 'P'
++
++def calcobjsize(fmt):
++ return struct.calcsize(_header + fmt + '0P')
++
++def calcvobjsize(fmt):
++ return struct.calcsize(_vheader + fmt + '0P')
++
++
++_TPFLAGS_HAVE_GC = 1<<14
++_TPFLAGS_HEAPTYPE = 1<<9
++
++def check_sizeof(test, o, size):
++ result = sys.getsizeof(o)
++ # add GC header size
++ if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
++ ((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
++ size += _testcapi.SIZEOF_PYGC_HEAD
++ msg = 'wrong size for %s: got %d, expected %d' \
++ % (type(o), result, size)
++ test.assertEqual(result, size, msg)
++
++
+ #=======================================================================
+ # Decorator for running a function in a different locale, correctly resetting
+ # it afterwards.
+diff -r 70274d53c1dd Lib/test/test_sys.py
+--- a/Lib/test/test_sys.py
++++ b/Lib/test/test_sys.py
+@@ -490,22 +490,8 @@
+
+ class SizeofTest(unittest.TestCase):
+
+- TPFLAGS_HAVE_GC = 1<<14
+- TPFLAGS_HEAPTYPE = 1L<<9
+-
+ def setUp(self):
+- self.c = len(struct.pack('c', ' '))
+- self.H = len(struct.pack('H', 0))
+- self.i = len(struct.pack('i', 0))
+- self.l = len(struct.pack('l', 0))
+- self.P = len(struct.pack('P', 0))
+- # due to missing size_t information from struct, it is assumed that
+- # sizeof(Py_ssize_t) = sizeof(void*)
+- self.header = 'PP'
+- self.vheader = self.header + 'P'
+- if hasattr(sys, "gettotalrefcount"):
+- self.header += '2P'
+- self.vheader += '2P'
++ self.P = struct.calcsize('P')
+ self.longdigit = sys.long_info.sizeof_digit
+ import _testcapi
+ self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
+@@ -515,128 +501,109 @@
+ self.file.close()
+ test.test_support.unlink(test.test_support.TESTFN)
+
+- def check_sizeof(self, o, size):
+- result = sys.getsizeof(o)
+- if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
+- ((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
+- size += self.gc_headsize
+- msg = 'wrong size for %s: got %d, expected %d' \
+- % (type(o), result, size)
+- self.assertEqual(result, size, msg)
+-
+- def calcsize(self, fmt):
+- """Wrapper around struct.calcsize which enforces the alignment of the
+- end of a structure to the alignment requirement of pointer.
+-
+- Note: This wrapper should only be used if a pointer member is included
+- and no member with a size larger than a pointer exists.
+- """
+- return struct.calcsize(fmt + '0P')
++ check_sizeof = test.test_support.check_sizeof
+
+ def test_gc_head_size(self):
+ # Check that the gc header size is added to objects tracked by the gc.
+- h = self.header
+- size = self.calcsize
++ size = test.test_support.calcobjsize
+ gc_header_size = self.gc_headsize
+ # bool objects are not gc tracked
+- self.assertEqual(sys.getsizeof(True), size(h + 'l'))
++ self.assertEqual(sys.getsizeof(True), size('l'))
+ # but lists are
+- self.assertEqual(sys.getsizeof([]), size(h + 'P PP') + gc_header_size)
++ self.assertEqual(sys.getsizeof([]), size('P PP') + gc_header_size)
+
+ def test_default(self):
+- h = self.header
+- size = self.calcsize
+- self.assertEqual(sys.getsizeof(True, -1), size(h + 'l'))
++ size = test.test_support.calcobjsize
++ self.assertEqual(sys.getsizeof(True, -1), size('l'))
+
+ def test_objecttypes(self):
+ # check all types defined in Objects/
+- h = self.header
+- vh = self.vheader
+- size = self.calcsize
++ size = test.test_support.calcobjsize
++ vsize = test.test_support.calcvobjsize
+ check = self.check_sizeof
+ # bool
+- check(True, size(h + 'l'))
++ check(True, size('l'))
+ # buffer
+ with test.test_support.check_py3k_warnings():
+- check(buffer(''), size(h + '2P2Pil'))
++ check(buffer(''), size('2P2Pil'))
+ # builtin_function_or_method
+- check(len, size(h + '3P'))
++ check(len, size('3P'))
+ # bytearray
+ samples = ['', 'u'*100000]
+ for sample in samples:
+ x = bytearray(sample)
+- check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
++ check(x, vsize('iPP') + x.__alloc__())
+ # bytearray_iterator
+- check(iter(bytearray()), size(h + 'PP'))
++ check(iter(bytearray()), size('PP'))
+ # cell
+ def get_cell():
+ x = 42
+ def inner():
+ return x
+ return inner
+- check(get_cell().func_closure[0], size(h + 'P'))
++ check(get_cell().func_closure[0], size('P'))
+ # classobj (old-style class)
+ class class_oldstyle():
+ def method():
+ pass
+- check(class_oldstyle, size(h + '7P'))
++ check(class_oldstyle, size('7P'))
+ # instance (old-style class)
+- check(class_oldstyle(), size(h + '3P'))
++ check(class_oldstyle(), size('3P'))
+ # instancemethod (old-style class)
+- check(class_oldstyle().method, size(h + '4P'))
++ check(class_oldstyle().method, size('4P'))
+ # complex
+- check(complex(0,1), size(h + '2d'))
++ check(complex(0,1), size('2d'))
+ # code
+- check(get_cell().func_code, size(h + '4i8Pi3P'))
++ check(get_cell().func_code, size('4i8Pi3P'))
+ # BaseException
+- check(BaseException(), size(h + '3P'))
++ check(BaseException(), size('3P'))
+ # UnicodeEncodeError
+- check(UnicodeEncodeError("", u"", 0, 0, ""), size(h + '5P2PP'))
++ check(UnicodeEncodeError("", u"", 0, 0, ""), size('5P2PP'))
+ # UnicodeDecodeError
+- check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
++ check(UnicodeDecodeError("", "", 0, 0, ""), size('5P2PP'))
+ # UnicodeTranslateError
+- check(UnicodeTranslateError(u"", 0, 1, ""), size(h + '5P2PP'))
++ check(UnicodeTranslateError(u"", 0, 1, ""), size('5P2PP'))
+ # method_descriptor (descriptor object)
+- check(str.lower, size(h + '2PP'))
++ check(str.lower, size('2PP'))
+ # classmethod_descriptor (descriptor object)
+ # XXX
+ # member_descriptor (descriptor object)
+ import datetime
+- check(datetime.timedelta.days, size(h + '2PP'))
++ check(datetime.timedelta.days, size('2PP'))
+ # getset_descriptor (descriptor object)
+ import __builtin__
+- check(__builtin__.file.closed, size(h + '2PP'))
++ check(__builtin__.file.closed, size('2PP'))
+ # wrapper_descriptor (descriptor object)
+- check(int.__add__, size(h + '2P2P'))
++ check(int.__add__, size('2P2P'))
+ # dictproxy
+ class C(object): pass
+- check(C.__dict__, size(h + 'P'))
++ check(C.__dict__, size('P'))
+ # method-wrapper (descriptor object)
+- check({}.__iter__, size(h + '2P'))
++ check({}.__iter__, size('2P'))
+ # dict
+- check({}, size(h + '3P2P' + 8*'P2P'))
++ check({}, size('3P2P' + 8*'P2P'))
+ x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
+- check(x, size(h + '3P2P' + 8*'P2P') + 16*size('P2P'))
++ check(x, size('3P2P' + 8*'P2P') + 16*struct.calcsize('P2P'))
+ # dictionary-keyiterator
+- check({}.iterkeys(), size(h + 'P2PPP'))
++ check({}.iterkeys(), size('P2PPP'))
+ # dictionary-valueiterator
+- check({}.itervalues(), size(h + 'P2PPP'))
++ check({}.itervalues(), size('P2PPP'))
+ # dictionary-itemiterator
+- check({}.iteritems(), size(h + 'P2PPP'))
++ check({}.iteritems(), size('P2PPP'))
+ # ellipses
+- check(Ellipsis, size(h + ''))
++ check(Ellipsis, size(''))
+ # EncodingMap
+ import codecs, encodings.iso8859_3
+ x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
+- check(x, size(h + '32B2iB'))
++ check(x, size('32B2iB'))
+ # enumerate
+- check(enumerate([]), size(h + 'l3P'))
++ check(enumerate([]), size('l3P'))
+ # file
+- check(self.file, size(h + '4P2i4P3i3P3i'))
++ check(self.file, size('4P2i4P3i3P3i'))
+ # float
+- check(float(0), size(h + 'd'))
++ check(float(0), size('d'))
+ # sys.floatinfo
+- check(sys.float_info, size(vh) + self.P * len(sys.float_info))
++ check(sys.float_info, vsize('') + self.P * len(sys.float_info))
+ # frame
+ import inspect
+ CO_MAXBLOCKS = 20
+@@ -645,10 +612,10 @@
+ nfrees = len(x.f_code.co_freevars)
+ extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
+ ncells + nfrees - 1
+- check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
++ check(x, vsize('12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
+ # function
+ def func(): pass
+- check(func, size(h + '9P'))
++ check(func, size('9P'))
+ class c():
+ @staticmethod
+ def foo():
+@@ -657,65 +624,65 @@
+ def bar(cls):
+ pass
+ # staticmethod
+- check(foo, size(h + 'P'))
++ check(foo, size('P'))
+ # classmethod
+- check(bar, size(h + 'P'))
++ check(bar, size('P'))
+ # generator
+ def get_gen(): yield 1
+- check(get_gen(), size(h + 'Pi2P'))
++ check(get_gen(), size('Pi2P'))
+ # integer
+- check(1, size(h + 'l'))
+- check(100, size(h + 'l'))
++ check(1, size('l'))
++ check(100, size('l'))
+ # iterator
+- check(iter('abc'), size(h + 'lP'))
++ check(iter('abc'), size('lP'))
+ # callable-iterator
+ import re
+- check(re.finditer('',''), size(h + '2P'))
++ check(re.finditer('',''), size('2P'))
+ # list
+ samples = [[], [1,2,3], ['1', '2', '3']]
+ for sample in samples:
+- check(sample, size(vh + 'PP') + len(sample)*self.P)
++ check(sample, vsize('PP') + len(sample)*self.P)
+ # sortwrapper (list)
+ # XXX
+ # cmpwrapper (list)
+ # XXX
+ # listiterator (list)
+- check(iter([]), size(h + 'lP'))
++ check(iter([]), size('lP'))
+ # listreverseiterator (list)
+- check(reversed([]), size(h + 'lP'))
++ check(reversed([]), size('lP'))
+ # long
+- check(0L, size(vh))
+- check(1L, size(vh) + self.longdigit)
+- check(-1L, size(vh) + self.longdigit)
++ check(0L, vsize(''))
++ check(1L, vsize('') + self.longdigit)
++ check(-1L, vsize('') + self.longdigit)
+ PyLong_BASE = 2**sys.long_info.bits_per_digit
+- check(long(PyLong_BASE), size(vh) + 2*self.longdigit)
+- check(long(PyLong_BASE**2-1), size(vh) + 2*self.longdigit)
+- check(long(PyLong_BASE**2), size(vh) + 3*self.longdigit)
++ check(long(PyLong_BASE), vsize('') + 2*self.longdigit)
++ check(long(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
++ check(long(PyLong_BASE**2), vsize('') + 3*self.longdigit)
+ # module
+- check(unittest, size(h + 'P'))
++ check(unittest, size('P'))
+ # None
+- check(None, size(h + ''))
++ check(None, size(''))
+ # object
+- check(object(), size(h + ''))
++ check(object(), size(''))
+ # property (descriptor object)
+ class C(object):
+ def getx(self): return self.__x
+ def setx(self, value): self.__x = value
+ def delx(self): del self.__x
+ x = property(getx, setx, delx, "")
+- check(x, size(h + '4Pi'))
++ check(x, size('4Pi'))
+ # PyCObject
+ # PyCapsule
+ # XXX
+ # rangeiterator
+- check(iter(xrange(1)), size(h + '4l'))
++ check(iter(xrange(1)), size('4l'))
+ # reverse
+- check(reversed(''), size(h + 'PP'))
++ check(reversed(''), size('PP'))
+ # set
+ # frozenset
+ PySet_MINSIZE = 8
+ samples = [[], range(10), range(50)]
+- s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
++ s = size('3P2P' + PySet_MINSIZE*'lP' + 'lP')
+ for sample in samples:
+ minused = len(sample)
+ if minused == 0: tmp = 1
+@@ -732,23 +699,24 @@
+ check(set(sample), s + newsize*struct.calcsize('lP'))
+ check(frozenset(sample), s + newsize*struct.calcsize('lP'))
+ # setiterator
+- check(iter(set()), size(h + 'P3P'))
++ check(iter(set()), size('P3P'))
+ # slice
+- check(slice(1), size(h + '3P'))
++ check(slice(1), size('3P'))
+ # str
+- check('', struct.calcsize(vh + 'li') + 1)
+- check('abc', struct.calcsize(vh + 'li') + 1 + 3*self.c)
++ vh = test.test_support._vheader
++ check('', struct.calcsize(vh + 'lic'))
++ check('abc', struct.calcsize(vh + 'lic') + 3)
+ # super
+- check(super(int), size(h + '3P'))
++ check(super(int), size('3P'))
+ # tuple
+- check((), size(vh))
+- check((1,2,3), size(vh) + 3*self.P)
++ check((), vsize(''))
++ check((1,2,3), vsize('') + 3*self.P)
+ # tupleiterator
+- check(iter(()), size(h + 'lP'))
++ check(iter(()), size('lP'))
+ # type
+ # (PyTypeObject + PyNumberMethods + PyMappingMethods +
+ # PySequenceMethods + PyBufferProcs)
+- s = size(vh + 'P2P15Pl4PP9PP11PI') + size('41P 10P 3P 6P')
++ s = vsize('P2P15Pl4PP9PP11PI') + struct.calcsize('41P 10P 3P 6P')
+ class newstyleclass(object):
+ pass
+ check(newstyleclass, s)
+@@ -763,41 +731,40 @@
+ # we need to test for both sizes, because we don't know if the string
+ # has been cached
+ for s in samples:
+- check(s, size(h + 'PPlP') + usize * (len(s) + 1))
++ check(s, size('PPlP') + usize * (len(s) + 1))
+ # weakref
+ import weakref
+- check(weakref.ref(int), size(h + '2Pl2P'))
++ check(weakref.ref(int), size('2Pl2P'))
+ # weakproxy
+ # XXX
+ # weakcallableproxy
+- check(weakref.proxy(int), size(h + '2Pl2P'))
++ check(weakref.proxy(int), size('2Pl2P'))
+ # xrange
+- check(xrange(1), size(h + '3l'))
+- check(xrange(66000), size(h + '3l'))
++ check(xrange(1), size('3l'))
++ check(xrange(66000), size('3l'))
+
+ def test_pythontypes(self):
+ # check all types defined in Python/
+- h = self.header
+- vh = self.vheader
+- size = self.calcsize
++ size = test.test_support.calcobjsize
++ vsize = test.test_support.calcvobjsize
+ check = self.check_sizeof
+ # _ast.AST
+ import _ast
+- check(_ast.AST(), size(h + ''))
++ check(_ast.AST(), size(''))
+ # imp.NullImporter
+ import imp
+- check(imp.NullImporter(self.file.name), size(h + ''))
++ check(imp.NullImporter(self.file.name), size(''))
+ try:
+ raise TypeError
+ except TypeError:
+ tb = sys.exc_info()[2]
+ # traceback
+ if tb != None:
+- check(tb, size(h + '2P2i'))
++ check(tb, size('2P2i'))
+ # symtable entry
+ # XXX
+ # sys.flags
+- check(sys.flags, size(vh) + self.P * len(sys.flags))
++ check(sys.flags, vsize('') + self.P * len(sys.flags))
+
+
+ def test_main():
+diff -r 70274d53c1dd Lib/test/test_sys_settrace.py
+--- a/Lib/test/test_sys_settrace.py
++++ b/Lib/test/test_sys_settrace.py
+@@ -670,6 +670,14 @@
+ no_jump_to_non_integers.jump = (2, "Spam")
+ no_jump_to_non_integers.output = [True]
+
++def jump_across_with(output):
++ with open(test_support.TESTFN, "wb") as fp:
++ pass
++ with open(test_support.TESTFN, "wb") as fp:
++ pass
++jump_across_with.jump = (1, 3)
++jump_across_with.output = []
++
+ # This verifies that you can't set f_lineno via _getframe or similar
+ # trickery.
+ def no_jump_without_trace_function():
+@@ -739,6 +747,9 @@
+ self.run_test(no_jump_to_non_integers)
+ def test_19_no_jump_without_trace_function(self):
+ no_jump_without_trace_function()
++ def test_jump_across_with(self):
++ self.addCleanup(test_support.unlink, test_support.TESTFN)
++ self.run_test(jump_across_with)
+
+ def test_20_large_function(self):
+ d = {}
+diff -r 70274d53c1dd Lib/test/test_tarfile.py
+--- a/Lib/test/test_tarfile.py
++++ b/Lib/test/test_tarfile.py
+@@ -154,6 +154,9 @@
+ def test_fileobj_symlink2(self):
+ self._test_fileobj_link("./ustar/linktest2/symtype", "ustar/linktest1/regtype")
+
++ def test_issue14160(self):
++ self._test_fileobj_link("symtype2", "ustar/regtype")
++
+
+ class CommonReadTest(ReadTest):
+
+diff -r 70274d53c1dd Lib/test/test_telnetlib.py
+--- a/Lib/test/test_telnetlib.py
++++ b/Lib/test/test_telnetlib.py
+@@ -3,6 +3,7 @@
+ import time
+ import Queue
+
++import unittest
+ from unittest import TestCase
+ from test import test_support
+ threading = test_support.import_module('threading')
+@@ -135,6 +136,28 @@
+ self.assertEqual(data, want[0])
+ self.assertEqual(telnet.read_all(), 'not seen')
+
++ def test_read_until_with_poll(self):
++ """Use select.poll() to implement telnet.read_until()."""
++ want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
++ self.dataq.put(want)
++ telnet = telnetlib.Telnet(HOST, self.port)
++ if not telnet._has_poll:
++ raise unittest.SkipTest('select.poll() is required')
++ telnet._has_poll = True
++ self.dataq.join()
++ data = telnet.read_until('match')
++ self.assertEqual(data, ''.join(want[:-2]))
++
++ def test_read_until_with_select(self):
++ """Use select.select() to implement telnet.read_until()."""
++ want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
++ self.dataq.put(want)
++ telnet = telnetlib.Telnet(HOST, self.port)
++ telnet._has_poll = False
++ self.dataq.join()
++ data = telnet.read_until('match')
++ self.assertEqual(data, ''.join(want[:-2]))
++
+ def test_read_all_A(self):
+ """
+ read_all()
+@@ -357,8 +380,75 @@
+ self.assertEqual('', telnet.read_sb_data())
+ nego.sb_getter = None # break the nego => telnet cycle
+
++
++class ExpectTests(TestCase):
++ def setUp(self):
++ self.evt = threading.Event()
++ self.dataq = Queue.Queue()
++ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
++ self.sock.settimeout(10)
++ self.port = test_support.bind_port(self.sock)
++ self.thread = threading.Thread(target=server, args=(self.evt,self.sock,
++ self.dataq))
++ self.thread.start()
++ self.evt.wait()
++
++ def tearDown(self):
++ self.thread.join()
++
++ # use a similar approach to testing timeouts as test_timeout.py
++ # these will never pass 100% but make the fuzz big enough that it is rare
++ block_long = 0.6
++ block_short = 0.3
++ def test_expect_A(self):
++ """
++ expect(expected, [timeout])
++ Read until the expected string has been seen, or a timeout is
++ hit (default is no timeout); may block.
++ """
++ want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
++ self.dataq.put(want)
++ telnet = telnetlib.Telnet(HOST, self.port)
++ self.dataq.join()
++ (_,_,data) = telnet.expect(['match'])
++ self.assertEqual(data, ''.join(want[:-2]))
++
++ def test_expect_B(self):
++ # test the timeout - it does NOT raise socket.timeout
++ want = ['hello', self.block_long, 'not seen', EOF_sigil]
++ self.dataq.put(want)
++ telnet = telnetlib.Telnet(HOST, self.port)
++ self.dataq.join()
++ (_,_,data) = telnet.expect(['not seen'], self.block_short)
++ self.assertEqual(data, want[0])
++ self.assertEqual(telnet.read_all(), 'not seen')
++
++ def test_expect_with_poll(self):
++ """Use select.poll() to implement telnet.expect()."""
++ want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
++ self.dataq.put(want)
++ telnet = telnetlib.Telnet(HOST, self.port)
++ if not telnet._has_poll:
++ raise unittest.SkipTest('select.poll() is required')
++ telnet._has_poll = True
++ self.dataq.join()
++ (_,_,data) = telnet.expect(['match'])
++ self.assertEqual(data, ''.join(want[:-2]))
++
++ def test_expect_with_select(self):
++ """Use select.select() to implement telnet.expect()."""
++ want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
++ self.dataq.put(want)
++ telnet = telnetlib.Telnet(HOST, self.port)
++ telnet._has_poll = False
++ self.dataq.join()
++ (_,_,data) = telnet.expect(['match'])
++ self.assertEqual(data, ''.join(want[:-2]))
++
++
+ def test_main(verbose=None):
+- test_support.run_unittest(GeneralTests, ReadTests, OptionTests)
++ test_support.run_unittest(GeneralTests, ReadTests, OptionTests,
++ ExpectTests)
+
+ if __name__ == '__main__':
+ test_main()
+diff -r 70274d53c1dd Lib/test/test_thread.py
+--- a/Lib/test/test_thread.py
++++ b/Lib/test/test_thread.py
+@@ -130,6 +130,29 @@
+ time.sleep(0.01)
+ self.assertEqual(thread._count(), orig)
+
++ def test_save_exception_state_on_error(self):
++ # See issue #14474
++ def task():
++ started.release()
++ raise SyntaxError
++ def mywrite(self, *args):
++ try:
++ raise ValueError
++ except ValueError:
++ pass
++ real_write(self, *args)
++ c = thread._count()
++ started = thread.allocate_lock()
++ with test_support.captured_output("stderr") as stderr:
++ real_write = stderr.write
++ stderr.write = mywrite
++ started.acquire()
++ thread.start_new_thread(task, ())
++ started.acquire()
++ while thread._count() > c:
++ time.sleep(0.01)
++ self.assertIn("Traceback", stderr.getvalue())
++
+
+ class Barrier:
+ def __init__(self, num_threads):
+diff -r 70274d53c1dd Lib/test/test_threading.py
+--- a/Lib/test/test_threading.py
++++ b/Lib/test/test_threading.py
+@@ -2,6 +2,8 @@
+
+ import test.test_support
+ from test.test_support import verbose
++from test.script_helper import assert_python_ok
++
+ import random
+ import re
+ import sys
+@@ -414,6 +416,33 @@
+ msg=('%d references still around' %
+ sys.getrefcount(weak_raising_cyclic_object())))
+
++ @unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
++ def test_dummy_thread_after_fork(self):
++ # Issue #14308: a dummy thread in the active list doesn't mess up
++ # the after-fork mechanism.
++ code = """if 1:
++ import thread, threading, os, time
++
++ def background_thread(evt):
++ # Creates and registers the _DummyThread instance
++ threading.current_thread()
++ evt.set()
++ time.sleep(10)
++
++ evt = threading.Event()
++ thread.start_new_thread(background_thread, (evt,))
++ evt.wait()
++ assert threading.active_count() == 2, threading.active_count()
++ if os.fork() == 0:
++ assert threading.active_count() == 1, threading.active_count()
++ os._exit(0)
++ else:
++ os.wait()
++ """
++ _, out, err = assert_python_ok("-c", code)
++ self.assertEqual(out, '')
++ self.assertEqual(err, '')
++
+
+ class ThreadJoinOnShutdown(BaseTestCase):
+
+diff -r 70274d53c1dd Lib/test/test_tokenize.py
+--- a/Lib/test/test_tokenize.py
++++ b/Lib/test/test_tokenize.py
+@@ -278,6 +278,31 @@
+ OP '+' (1, 32) (1, 33)
+ STRING 'UR"ABC"' (1, 34) (1, 41)
+
++ >>> dump_tokens("b'abc' + B'abc'")
++ STRING "b'abc'" (1, 0) (1, 6)
++ OP '+' (1, 7) (1, 8)
++ STRING "B'abc'" (1, 9) (1, 15)
++ >>> dump_tokens('b"abc" + B"abc"')
++ STRING 'b"abc"' (1, 0) (1, 6)
++ OP '+' (1, 7) (1, 8)
++ STRING 'B"abc"' (1, 9) (1, 15)
++ >>> dump_tokens("br'abc' + bR'abc' + Br'abc' + BR'abc'")
++ STRING "br'abc'" (1, 0) (1, 7)
++ OP '+' (1, 8) (1, 9)
++ STRING "bR'abc'" (1, 10) (1, 17)
++ OP '+' (1, 18) (1, 19)
++ STRING "Br'abc'" (1, 20) (1, 27)
++ OP '+' (1, 28) (1, 29)
++ STRING "BR'abc'" (1, 30) (1, 37)
++ >>> dump_tokens('br"abc" + bR"abc" + Br"abc" + BR"abc"')
++ STRING 'br"abc"' (1, 0) (1, 7)
++ OP '+' (1, 8) (1, 9)
++ STRING 'bR"abc"' (1, 10) (1, 17)
++ OP '+' (1, 18) (1, 19)
++ STRING 'Br"abc"' (1, 20) (1, 27)
++ OP '+' (1, 28) (1, 29)
++ STRING 'BR"abc"' (1, 30) (1, 37)
++
+ Operators
+
+ >>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass")
+diff -r 70274d53c1dd Lib/test/test_tools.py
+--- /dev/null
++++ b/Lib/test/test_tools.py
+@@ -0,0 +1,39 @@
++"""Tests for scripts in the Tools directory.
++
++This file contains regression tests for some of the scripts found in the
++Tools directory of a Python checkout or tarball, such as reindent.py.
++"""
++
++import os
++import unittest
++import sysconfig
++from test import test_support
++from test.script_helper import assert_python_ok
++
++if not sysconfig.is_python_build():
++ # XXX some installers do contain the tools, should we detect that
++ # and run the tests in that case too?
++ raise unittest.SkipTest('test irrelevant for an installed Python')
++
++srcdir = sysconfig.get_config_var('projectbase')
++basepath = os.path.join(os.getcwd(), srcdir, 'Tools')
++
++
++class ReindentTests(unittest.TestCase):
++ script = os.path.join(basepath, 'scripts', 'reindent.py')
++
++ def test_noargs(self):
++ assert_python_ok(self.script)
++
++ def test_help(self):
++ rc, out, err = assert_python_ok(self.script, '-h')
++ self.assertEqual(out, b'')
++ self.assertGreater(err, b'')
++
++
++def test_main():
++ test_support.run_unittest(ReindentTests)
++
++
++if __name__ == '__main__':
++ unittest.main()
+diff -r 70274d53c1dd Lib/test/test_urllib2.py
+--- a/Lib/test/test_urllib2.py
++++ b/Lib/test/test_urllib2.py
+@@ -1106,12 +1106,30 @@
+ self._test_basic_auth(opener, auth_handler, "Authorization",
+ realm, http_handler, password_manager,
+ "http://acme.example.com/protected",
+- "http://acme.example.com/protected",
+- )
++ "http://acme.example.com/protected"
++ )
+
+ def test_basic_auth_with_single_quoted_realm(self):
+ self.test_basic_auth(quote_char="'")
+
++ def test_basic_auth_with_unquoted_realm(self):
++ opener = OpenerDirector()
++ password_manager = MockPasswordManager()
++ auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
++ realm = "ACME Widget Store"
++ http_handler = MockHTTPHandler(
++ 401, 'WWW-Authenticate: Basic realm=%s\r\n\r\n' % realm)
++ opener.add_handler(auth_handler)
++ opener.add_handler(http_handler)
++ msg = "Basic Auth Realm was unquoted"
++ with test_support.check_warnings((msg, UserWarning)):
++ self._test_basic_auth(opener, auth_handler, "Authorization",
++ realm, http_handler, password_manager,
++ "http://acme.example.com/protected",
++ "http://acme.example.com/protected"
++ )
++
++
+ def test_proxy_basic_auth(self):
+ opener = OpenerDirector()
+ ph = urllib2.ProxyHandler(dict(http="proxy.example.com:3128"))
+diff -r 70274d53c1dd Lib/test/test_urlparse.py
+--- a/Lib/test/test_urlparse.py
++++ b/Lib/test/test_urlparse.py
+@@ -437,6 +437,18 @@
+ self.assertEqual(p.port, 80)
+ self.assertEqual(p.geturl(), url)
+
++ # Verify an illegal port of value greater than 65535 is set as None
++ url = "http://www.python.org:65536"
++ p = urlparse.urlsplit(url)
++ self.assertEqual(p.port, None)
++
++ def test_issue14072(self):
++ p1 = urlparse.urlsplit('tel:+31-641044153')
++ self.assertEqual(p1.scheme, 'tel')
++ self.assertEqual(p1.path, '+31-641044153')
++ p2 = urlparse.urlsplit('tel:+31641044153')
++ self.assertEqual(p2.scheme, 'tel')
++ self.assertEqual(p2.path, '+31641044153')
+
+ def test_attributes_bad_port(self):
+ """Check handling of non-integer ports."""
+@@ -493,6 +505,10 @@
+ ('s3','foo.com','/stuff','','',''))
+ self.assertEqual(urlparse.urlparse("x-newscheme://foo.com/stuff"),
+ ('x-newscheme','foo.com','/stuff','','',''))
++ self.assertEqual(urlparse.urlparse("x-newscheme://foo.com/stuff?query#fragment"),
++ ('x-newscheme','foo.com','/stuff','','query','fragment'))
++ self.assertEqual(urlparse.urlparse("x-newscheme://foo.com/stuff?query"),
++ ('x-newscheme','foo.com','/stuff','','query',''))
+
+ def test_withoutscheme(self):
+ # Test urlparse without scheme
+diff -r 70274d53c1dd Lib/test/test_weakref.py
+--- a/Lib/test/test_weakref.py
++++ b/Lib/test/test_weakref.py
+@@ -815,11 +815,71 @@
+ def __repr__(self):
+ return "<Object %r>" % self.arg
+
++class RefCycle:
++ def __init__(self):
++ self.cycle = self
++
+
+ class MappingTestCase(TestBase):
+
+ COUNT = 10
+
++ def check_len_cycles(self, dict_type, cons):
++ N = 20
++ items = [RefCycle() for i in range(N)]
++ dct = dict_type(cons(o) for o in items)
++ # Keep an iterator alive
++ it = dct.iteritems()
++ try:
++ next(it)
++ except StopIteration:
++ pass
++ del items
++ gc.collect()
++ n1 = len(dct)
++ del it
++ gc.collect()
++ n2 = len(dct)
++ # one item may be kept alive inside the iterator
++ self.assertIn(n1, (0, 1))
++ self.assertEqual(n2, 0)
++
++ def test_weak_keyed_len_cycles(self):
++ self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
++
++ def test_weak_valued_len_cycles(self):
++ self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
++
++ def check_len_race(self, dict_type, cons):
++ # Extended sanity checks for len() in the face of cyclic collection
++ self.addCleanup(gc.set_threshold, *gc.get_threshold())
++ for th in range(1, 100):
++ N = 20
++ gc.collect(0)
++ gc.set_threshold(th, th, th)
++ items = [RefCycle() for i in range(N)]
++ dct = dict_type(cons(o) for o in items)
++ del items
++ # All items will be collected at next garbage collection pass
++ it = dct.iteritems()
++ try:
++ next(it)
++ except StopIteration:
++ pass
++ n1 = len(dct)
++ del it
++ n2 = len(dct)
++ self.assertGreaterEqual(n1, 0)
++ self.assertLessEqual(n1, N)
++ self.assertGreaterEqual(n2, 0)
++ self.assertLessEqual(n2, n1)
++
++ def test_weak_keyed_len_race(self):
++ self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
++
++ def test_weak_valued_len_race(self):
++ self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
++
+ def test_weak_values(self):
+ #
+ # This exercises d.copy(), d.items(), d[], del d[], len(d).
+diff -r 70274d53c1dd Lib/test/test_weakset.py
+--- a/Lib/test/test_weakset.py
++++ b/Lib/test/test_weakset.py
+@@ -30,6 +30,10 @@
+ def __hash__(self):
+ return hash((SomeClass, self.value))
+
++class RefCycle(object):
++ def __init__(self):
++ self.cycle = self
++
+ class TestWeakSet(unittest.TestCase):
+
+ def setUp(self):
+@@ -37,6 +41,12 @@
+ self.items = [SomeClass(c) for c in ('a', 'b', 'c')]
+ self.items2 = [SomeClass(c) for c in ('x', 'y', 'z')]
+ self.letters = [SomeClass(c) for c in string.ascii_letters]
++ self.ab_items = [SomeClass(c) for c in 'ab']
++ self.abcde_items = [SomeClass(c) for c in 'abcde']
++ self.def_items = [SomeClass(c) for c in 'def']
++ self.ab_weakset = WeakSet(self.ab_items)
++ self.abcde_weakset = WeakSet(self.abcde_items)
++ self.def_weakset = WeakSet(self.def_items)
+ self.s = WeakSet(self.items)
+ self.d = dict.fromkeys(self.items)
+ self.obj = SomeClass('F')
+@@ -79,6 +89,11 @@
+ x = WeakSet(self.items + self.items2)
+ c = C(self.items2)
+ self.assertEqual(self.s.union(c), x)
++ del c
++ self.assertEqual(len(u), len(self.items) + len(self.items2))
++ self.items2.pop()
++ gc.collect()
++ self.assertEqual(len(u), len(self.items) + len(self.items2))
+
+ def test_or(self):
+ i = self.s.union(self.items2)
+@@ -86,14 +101,19 @@
+ self.assertEqual(self.s | frozenset(self.items2), i)
+
+ def test_intersection(self):
+- i = self.s.intersection(self.items2)
++ s = WeakSet(self.letters)
++ i = s.intersection(self.items2)
+ for c in self.letters:
+- self.assertEqual(c in i, c in self.d and c in self.items2)
+- self.assertEqual(self.s, WeakSet(self.items))
++ self.assertEqual(c in i, c in self.items2 and c in self.letters)
++ self.assertEqual(s, WeakSet(self.letters))
+ self.assertEqual(type(i), WeakSet)
+ for C in set, frozenset, dict.fromkeys, list, tuple:
+ x = WeakSet([])
+- self.assertEqual(self.s.intersection(C(self.items2)), x)
++ self.assertEqual(i.intersection(C(self.items)), x)
++ self.assertEqual(len(i), len(self.items2))
++ self.items2.pop()
++ gc.collect()
++ self.assertEqual(len(i), len(self.items2))
+
+ def test_isdisjoint(self):
+ self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
+@@ -124,6 +144,10 @@
+ self.assertEqual(self.s, WeakSet(self.items))
+ self.assertEqual(type(i), WeakSet)
+ self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
++ self.assertEqual(len(i), len(self.items) + len(self.items2))
++ self.items2.pop()
++ gc.collect()
++ self.assertEqual(len(i), len(self.items) + len(self.items2))
+
+ def test_xor(self):
+ i = self.s.symmetric_difference(self.items2)
+@@ -131,22 +155,28 @@
+ self.assertEqual(self.s ^ frozenset(self.items2), i)
+
+ def test_sub_and_super(self):
+- pl, ql, rl = map(lambda s: [SomeClass(c) for c in s], ['ab', 'abcde', 'def'])
+- p, q, r = map(WeakSet, (pl, ql, rl))
+- self.assertTrue(p < q)
+- self.assertTrue(p <= q)
+- self.assertTrue(q <= q)
+- self.assertTrue(q > p)
+- self.assertTrue(q >= p)
+- self.assertFalse(q < r)
+- self.assertFalse(q <= r)
+- self.assertFalse(q > r)
+- self.assertFalse(q >= r)
++ self.assertTrue(self.ab_weakset <= self.abcde_weakset)
++ self.assertTrue(self.abcde_weakset <= self.abcde_weakset)
++ self.assertTrue(self.abcde_weakset >= self.ab_weakset)
++ self.assertFalse(self.abcde_weakset <= self.def_weakset)
++ self.assertFalse(self.abcde_weakset >= self.def_weakset)
+ self.assertTrue(set('a').issubset('abc'))
+ self.assertTrue(set('abc').issuperset('a'))
+ self.assertFalse(set('a').issubset('cbs'))
+ self.assertFalse(set('cbs').issuperset('a'))
+
++ def test_lt(self):
++ self.assertTrue(self.ab_weakset < self.abcde_weakset)
++ self.assertFalse(self.abcde_weakset < self.def_weakset)
++ self.assertFalse(self.ab_weakset < self.ab_weakset)
++ self.assertFalse(WeakSet() < WeakSet())
++
++ def test_gt(self):
++ self.assertTrue(self.abcde_weakset > self.ab_weakset)
++ self.assertFalse(self.abcde_weakset > self.def_weakset)
++ self.assertFalse(self.ab_weakset > self.ab_weakset)
++ self.assertFalse(WeakSet() > WeakSet())
++
+ def test_gc(self):
+ # Create a nest of cycles to exercise overall ref count check
+ s = WeakSet(Foo() for i in range(1000))
+@@ -369,6 +399,49 @@
+ s.clear()
+ self.assertEqual(len(s), 0)
+
++ def test_len_cycles(self):
++ N = 20
++ items = [RefCycle() for i in range(N)]
++ s = WeakSet(items)
++ del items
++ it = iter(s)
++ try:
++ next(it)
++ except StopIteration:
++ pass
++ gc.collect()
++ n1 = len(s)
++ del it
++ gc.collect()
++ n2 = len(s)
++ # one item may be kept alive inside the iterator
++ self.assertIn(n1, (0, 1))
++ self.assertEqual(n2, 0)
++
++ def test_len_race(self):
++ # Extended sanity checks for len() in the face of cyclic collection
++ self.addCleanup(gc.set_threshold, *gc.get_threshold())
++ for th in range(1, 100):
++ N = 20
++ gc.collect(0)
++ gc.set_threshold(th, th, th)
++ items = [RefCycle() for i in range(N)]
++ s = WeakSet(items)
++ del items
++ # All items will be collected at next garbage collection pass
++ it = iter(s)
++ try:
++ next(it)
++ except StopIteration:
++ pass
++ n1 = len(s)
++ del it
++ n2 = len(s)
++ self.assertGreaterEqual(n1, 0)
++ self.assertLessEqual(n1, N)
++ self.assertGreaterEqual(n2, 0)
++ self.assertLessEqual(n2, n1)
++
+
+ def test_main(verbose=None):
+ test_support.run_unittest(TestWeakSet)
+diff -r 70274d53c1dd Lib/test/test_winreg.py
+--- a/Lib/test/test_winreg.py
++++ b/Lib/test/test_winreg.py
+@@ -1,7 +1,7 @@
+ # Test the windows specific win32reg module.
+ # Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
+
+-import os, sys
++import os, sys, errno
+ import unittest
+ from test import test_support
+ threading = test_support.import_module("threading")
+@@ -283,7 +283,13 @@
+ def test_dynamic_key(self):
+ # Issue2810, when the value is dynamically generated, these
+ # throw "WindowsError: More data is available" in 2.6 and 3.1
+- EnumValue(HKEY_PERFORMANCE_DATA, 0)
++ try:
++ EnumValue(HKEY_PERFORMANCE_DATA, 0)
++ except OSError as e:
++ if e.errno in (errno.EPERM, errno.EACCES):
++ self.skipTest("access denied to registry key "
++ "(are you running in a non-interactive session?)")
++ raise
+ QueryValueEx(HKEY_PERFORMANCE_DATA, None)
+
+ # Reflection requires XP x64/Vista at a minimum. XP doesn't have this stuff
+diff -r 70274d53c1dd Lib/test/test_zipfile.py
+--- a/Lib/test/test_zipfile.py
++++ b/Lib/test/test_zipfile.py
+@@ -908,6 +908,22 @@
+ with zipfile.ZipFile(TESTFN, mode="r") as zipf:
+ self.assertEqual(zipf.comment, comment2)
+
++ def test_change_comment_in_empty_archive(self):
++ with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
++ self.assertFalse(zipf.filelist)
++ zipf.comment = b"this is a comment"
++ with zipfile.ZipFile(TESTFN, "r") as zipf:
++ self.assertEqual(zipf.comment, b"this is a comment")
++
++ def test_change_comment_in_nonempty_archive(self):
++ with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
++ zipf.writestr("foo.txt", "O, for a Muse of Fire!")
++ with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
++ self.assertTrue(zipf.filelist)
++ zipf.comment = b"this is a comment"
++ with zipfile.ZipFile(TESTFN, "r") as zipf:
++ self.assertEqual(zipf.comment, b"this is a comment")
++
+ def check_testzip_with_bad_crc(self, compression):
+ """Tests that files with bad CRCs return their name from testzip."""
+ zipdata = self.zips_with_bad_crc[compression]
+diff -r 70274d53c1dd Lib/test/testtar.tar
+Binary file Lib/test/testtar.tar has changed
+diff -r 70274d53c1dd Lib/textwrap.py
+--- a/Lib/textwrap.py
++++ b/Lib/textwrap.py
+@@ -9,6 +9,14 @@
+
+ import string, re
+
++try:
++ _unicode = unicode
++except NameError:
++ # If Python is built without Unicode support, the unicode type
++ # will not exist. Fake one.
++ class _unicode(object):
++ pass
++
+ # Do the right thing with boolean values for all known Python versions
+ # (so this module can be copied to projects that don't depend on Python
+ # 2.3, e.g. Optik and Docutils) by uncommenting the block of code below.
+@@ -147,7 +155,7 @@
+ if self.replace_whitespace:
+ if isinstance(text, str):
+ text = text.translate(self.whitespace_trans)
+- elif isinstance(text, unicode):
++ elif isinstance(text, _unicode):
+ text = text.translate(self.unicode_whitespace_trans)
+ return text
+
+@@ -167,7 +175,7 @@
+ 'use', ' ', 'the', ' ', '-b', ' ', option!'
+ otherwise.
+ """
+- if isinstance(text, unicode):
++ if isinstance(text, _unicode):
+ if self.break_on_hyphens:
+ pat = self.wordsep_re_uni
+ else:
+diff -r 70274d53c1dd Lib/threading.py
+--- a/Lib/threading.py
++++ b/Lib/threading.py
+@@ -10,6 +10,7 @@
+
+ import warnings
+
++from collections import deque as _deque
+ from time import time as _time, sleep as _sleep
+ from traceback import format_exc as _format_exc
+
+@@ -605,6 +606,10 @@
+ pass
+
+ def __stop(self):
++ # DummyThreads delete self.__block, but they have no waiters to
++ # notify anyway (join() is forbidden on them).
++ if not hasattr(self, '_Thread__block'):
++ return
+ self.__block.acquire()
+ self.__stopped = True
+ self.__block.notify_all()
+@@ -909,7 +914,7 @@
+ self.rc = Condition(self.mon)
+ self.wc = Condition(self.mon)
+ self.limit = limit
+- self.queue = deque()
++ self.queue = _deque()
+
+ def put(self, item):
+ self.mon.acquire()
+diff -r 70274d53c1dd Lib/token.py
+--- a/Lib/token.py
++++ b/Lib/token.py
+@@ -7,7 +7,7 @@
+ # To update the symbols in this file, 'cd' to the top directory of
+ # the python source tree after building the interpreter and run:
+ #
+-# python Lib/token.py
++# ./python Lib/token.py
+
+ #--start constants--
+ ENDMARKER = 0
+diff -r 70274d53c1dd Lib/tokenize.py
+--- a/Lib/tokenize.py
++++ b/Lib/tokenize.py
+@@ -70,10 +70,10 @@
+ Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
+ # Tail end of """ string.
+ Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
+-Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
++Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
+ # Single-line ' or " string.
+-String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+- r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
++String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
++ r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+
+ # Because of leftmost-then-longest match semantics, be sure to put the
+ # longest operators first (e.g., if = came before ==, == would get
+@@ -91,9 +91,9 @@
+ Token = Ignore + PlainToken
+
+ # First (or only) line of ' or " string.
+-ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
++ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+ group("'", r'\\\r?\n'),
+- r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
++ r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+ group('"', r'\\\r?\n'))
+ PseudoExtras = group(r'\\\r?\n', Comment, Triple)
+ PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+diff -r 70274d53c1dd Lib/unittest/case.py
+--- a/Lib/unittest/case.py
++++ b/Lib/unittest/case.py
+@@ -6,6 +6,7 @@
+ import difflib
+ import pprint
+ import re
++import types
+ import warnings
+
+ from . import result
+@@ -55,7 +56,7 @@
+ Unconditionally skip a test.
+ """
+ def decorator(test_item):
+- if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
++ if not isinstance(test_item, (type, types.ClassType)):
+ @functools.wraps(test_item)
+ def skip_wrapper(*args, **kwargs):
+ raise SkipTest(reason)
+@@ -201,7 +202,11 @@
+ self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
+ self.addTypeEqualityFunc(set, 'assertSetEqual')
+ self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
+- self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
++ try:
++ self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
++ except NameError:
++ # No unicode support in this build
++ pass
+
+ def addTypeEqualityFunc(self, typeobj, function):
+ """Add a type specific assertEqual style function to compare a type.
+@@ -871,7 +876,7 @@
+ - [0, 1, 1] and [1, 0, 1] compare equal.
+ - [0, 0, 1] and [0, 1] compare unequal.
+ """
+- first_seq, second_seq = list(actual_seq), list(expected_seq)
++ first_seq, second_seq = list(expected_seq), list(actual_seq)
+ with warnings.catch_warnings():
+ if sys.py3kwarning:
+ # Silence Py3k warning raised during the sorting
+diff -r 70274d53c1dd Lib/unittest/test/test_skipping.py
+--- a/Lib/unittest/test/test_skipping.py
++++ b/Lib/unittest/test/test_skipping.py
+@@ -66,6 +66,36 @@
+ self.assertEqual(result.skipped, [(test, "testing")])
+ self.assertEqual(record, [])
+
++ def test_skip_non_unittest_class_old_style(self):
++ @unittest.skip("testing")
++ class Mixin:
++ def test_1(self):
++ record.append(1)
++ class Foo(Mixin, unittest.TestCase):
++ pass
++ record = []
++ result = unittest.TestResult()
++ test = Foo("test_1")
++ suite = unittest.TestSuite([test])
++ suite.run(result)
++ self.assertEqual(result.skipped, [(test, "testing")])
++ self.assertEqual(record, [])
++
++ def test_skip_non_unittest_class_new_style(self):
++ @unittest.skip("testing")
++ class Mixin(object):
++ def test_1(self):
++ record.append(1)
++ class Foo(Mixin, unittest.TestCase):
++ pass
++ record = []
++ result = unittest.TestResult()
++ test = Foo("test_1")
++ suite = unittest.TestSuite([test])
++ suite.run(result)
++ self.assertEqual(result.skipped, [(test, "testing")])
++ self.assertEqual(record, [])
++
+ def test_expected_failure(self):
+ class Foo(unittest.TestCase):
+ @unittest.expectedFailure
+diff -r 70274d53c1dd Lib/urllib.py
+--- a/Lib/urllib.py
++++ b/Lib/urllib.py
+@@ -980,11 +980,11 @@
+ self.hookargs = hookargs
+
+ def close(self):
+- addbase.close(self)
+ if self.closehook:
+ self.closehook(*self.hookargs)
+ self.closehook = None
+ self.hookargs = None
++ addbase.close(self)
+
+ class addinfo(addbase):
+ """class to add an info() method to an open file."""
+diff -r 70274d53c1dd Lib/urllib2.py
+--- a/Lib/urllib2.py
++++ b/Lib/urllib2.py
+@@ -102,6 +102,7 @@
+ import time
+ import urlparse
+ import bisect
++import warnings
+
+ try:
+ from cStringIO import StringIO
+@@ -109,7 +110,7 @@
+ from StringIO import StringIO
+
+ from urllib import (unwrap, unquote, splittype, splithost, quote,
+- addinfourl, splitport, splittag,
++ addinfourl, splitport, splittag, toBytes,
+ splitattr, ftpwrapper, splituser, splitpasswd, splitvalue)
+
+ # support for FileHandler, proxies via environment variables
+@@ -828,7 +829,7 @@
+ # allow for double- and single-quoted realm values
+ # (single quotes are a violation of the RFC, but appear in the wild)
+ rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
+- 'realm=(["\'])(.*?)\\2', re.I)
++ 'realm=(["\']?)([^"\']*)\\2', re.I)
+
+ # XXX could pre-emptively send auth info already accepted (RFC 2617,
+ # end of section 2, and section 1.2 immediately after "credentials"
+@@ -861,6 +862,9 @@
+ mo = AbstractBasicAuthHandler.rx.search(authreq)
+ if mo:
+ scheme, quote, realm = mo.groups()
++ if quote not in ['"', "'"]:
++ warnings.warn("Basic Auth Realm was unquoted",
++ UserWarning, 2)
+ if scheme.lower() == 'basic':
+ response = self.retry_http_basic_auth(host, req, realm)
+ if response and response.code != 401:
+diff -r 70274d53c1dd Lib/urlparse.py
+--- a/Lib/urlparse.py
++++ b/Lib/urlparse.py
+@@ -40,16 +40,9 @@
+ 'imap', 'wais', 'file', 'mms', 'https', 'shttp',
+ 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
+ 'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh']
+-non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
+- 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
+ uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
+ 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
+ 'mms', '', 'sftp']
+-uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
+- 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
+-uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
+- 'nntp', 'wais', 'https', 'shttp', 'snews',
+- 'file', 'prospero', '']
+
+ # Characters valid in scheme names
+ scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
+@@ -104,9 +97,11 @@
+ netloc = self.netloc.split('@')[-1].split(']')[-1]
+ if ':' in netloc:
+ port = netloc.split(':')[1]
+- return int(port, 10)
+- else:
+- return None
++ port = int(port, 10)
++ # verify legal port
++ if (0 <= port <= 65535):
++ return port
++ return None
+
+ from collections import namedtuple
+
+@@ -192,21 +187,21 @@
+ if c not in scheme_chars:
+ break
+ else:
+- try:
+- # make sure "url" is not actually a port number (in which case
+- # "scheme" is really part of the path
+- _testportnum = int(url[i+1:])
+- except ValueError:
+- scheme, url = url[:i].lower(), url[i+1:]
++ # make sure "url" is not actually a port number (in which case
++ # "scheme" is really part of the path)
++ rest = url[i+1:]
++ if not rest or any(c not in '0123456789' for c in rest):
++ # not a port number
++ scheme, url = url[:i].lower(), rest
+
+ if url[:2] == '//':
+ netloc, url = _splitnetloc(url, 2)
+ if (('[' in netloc and ']' not in netloc) or
+ (']' in netloc and '[' not in netloc)):
+ raise ValueError("Invalid IPv6 URL")
+- if allow_fragments and scheme in uses_fragment and '#' in url:
++ if allow_fragments and '#' in url:
+ url, fragment = url.split('#', 1)
+- if scheme in uses_query and '?' in url:
++ if '?' in url:
+ url, query = url.split('?', 1)
+ v = SplitResult(scheme, netloc, url, query, fragment)
+ _parse_cache[key] = v
+diff -r 70274d53c1dd Lib/zipfile.py
+--- a/Lib/zipfile.py
++++ b/Lib/zipfile.py
+@@ -651,7 +651,7 @@
+
+
+
+-class ZipFile:
++class ZipFile(object):
+ """ Class with methods to open, read, write, close, list zip files.
+
+ z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
+@@ -690,7 +690,7 @@
+ self.compression = compression # Method of compression
+ self.mode = key = mode.replace('b', '')[0]
+ self.pwd = None
+- self.comment = ''
++ self._comment = ''
+
+ # Check if we were passed a file-like object
+ if isinstance(file, basestring):
+@@ -765,7 +765,7 @@
+ print endrec
+ size_cd = endrec[_ECD_SIZE] # bytes in central directory
+ offset_cd = endrec[_ECD_OFFSET] # offset of central directory
+- self.comment = endrec[_ECD_COMMENT] # archive comment
++ self._comment = endrec[_ECD_COMMENT] # archive comment
+
+ # "concat" is zero, unless zip was concatenated to another file
+ concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
+@@ -864,6 +864,22 @@
+ """Set default password for encrypted files."""
+ self.pwd = pwd
+
++ @property
++ def comment(self):
++ """The comment text associated with the ZIP file."""
++ return self._comment
++
++ @comment.setter
++ def comment(self, comment):
++ # check for valid comment length
++ if len(comment) >= ZIP_MAX_COMMENT:
++ if self.debug:
++ print('Archive comment is too long; truncating to %d bytes'
++ % ZIP_MAX_COMMENT)
++ comment = comment[:ZIP_MAX_COMMENT]
++ self._comment = comment
++ self._didModify = True
++
+ def read(self, name, pwd=None):
+ """Return file bytes (as a string) for name."""
+ return self.open(name, "r", pwd).read()
+@@ -1243,18 +1259,11 @@
+ centDirSize = min(centDirSize, 0xFFFFFFFF)
+ centDirOffset = min(centDirOffset, 0xFFFFFFFF)
+
+- # check for valid comment length
+- if len(self.comment) >= ZIP_MAX_COMMENT:
+- if self.debug > 0:
+- msg = 'Archive comment is too long; truncating to %d bytes' \
+- % ZIP_MAX_COMMENT
+- self.comment = self.comment[:ZIP_MAX_COMMENT]
+-
+ endrec = struct.pack(structEndArchive, stringEndArchive,
+ 0, 0, centDirCount, centDirCount,
+- centDirSize, centDirOffset, len(self.comment))
++ centDirSize, centDirOffset, len(self._comment))
+ self.fp.write(endrec)
+- self.fp.write(self.comment)
++ self.fp.write(self._comment)
+ self.fp.flush()
+
+ if not self._filePassed:
+diff -r 70274d53c1dd Mac/README
+--- a/Mac/README
++++ b/Mac/README
+@@ -70,7 +70,7 @@
+ $ make
+ $ make install
+
+-This flag can be used a framework build of python, but also with a classic
++This flag can be used with a framework build of python, but also with a classic
+ unix build. Either way you will have to build python on Mac OS X 10.4 (or later)
+ with Xcode 2.1 (or later). You also have to install the 10.4u SDK when
+ installing Xcode.
+@@ -221,8 +221,8 @@
+
+ Go to the directory "Mac/OSX/BuildScript". There you'll find a script
+ "build-installer.py" that does all the work. This will download and build
+-a number of 3th-party libaries, configures and builds a framework Python,
+-installs it, creates the installer pacakge files and then packs this in a
++a number of 3rd-party libaries, configures and builds a framework Python,
++installs it, creates the installer package files and then packs this in a
+ DMG image.
+
+ The script will build a universal binary, you'll therefore have to run this
+@@ -258,8 +258,8 @@
+ Uninstalling a framework install, including the binary installer
+ ================================================================
+
+-Uninstalling a framework can be done by manually removing all bits that got installed,
+-that's true for both installations from source and installations using the binary installer.
++Uninstalling a framework can be done by manually removing all bits that got installed.
++That's true for both installations from source and installations using the binary installer.
+ Sadly enough OSX does not have a central uninstaller.
+
+ The main bit of a framework install is the framework itself, installed in
+diff -r 70274d53c1dd Makefile.pre.in
+--- a/Makefile.pre.in
++++ b/Makefile.pre.in
+@@ -152,7 +152,7 @@
+ SUBDIRSTOO= Include Lib Misc Demo
+
+ # Files and directories to be distributed
+-CONFIGFILES= configure configure.in acconfig.h pyconfig.h.in Makefile.pre.in
++CONFIGFILES= configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in
+ DISTFILES= README ChangeLog $(CONFIGFILES)
+ DISTDIRS= $(SUBDIRS) $(SUBDIRSTOO) Ext-dummy
+ DIST= $(DISTFILES) $(DISTDIRS)
+@@ -1165,7 +1165,7 @@
+ $(SHELL) config.status --recheck
+ $(SHELL) config.status
+
+-# Rebuild the configure script from configure.in; also rebuild pyconfig.h.in
++# Rebuild the configure script from configure.ac; also rebuild pyconfig.h.in
+ autoconf:
+ (cd $(srcdir); autoconf)
+ (cd $(srcdir); autoheader)
+diff -r 70274d53c1dd Misc/ACKS
+--- a/Misc/ACKS
++++ b/Misc/ACKS
+@@ -44,6 +44,7 @@
+ Matt Bandy
+ Michael J. Barber
+ Chris Barker
++Anton Barkovsky
+ Nick Barnes
+ Quentin Barnes
+ Richard Barran
+@@ -73,6 +74,7 @@
+ Steven Bethard
+ Stephen Bevan
+ Ron Bickers
++Natalia B. Bidart
+ David Binger
+ Dominic Binks
+ Philippe Biondi
+@@ -127,6 +129,7 @@
+ Brett Cannon
+ Mike Carlton
+ Terry Carroll
++Damien Cassou
+ Lorenzo M. Catucci
+ Donn Cave
+ Charles Cazabon
+@@ -273,6 +276,7 @@
+ John Fouhy
+ Martin Franklin
+ Robin Friedrich
++Bradley Froehle
+ Ivan Frohne
+ Jim Fulton
+ Tadayoshi Funaba
+@@ -295,6 +299,7 @@
+ Dan Gass
+ Andrew Gaul
+ Stephen M. Gava
++Xavier de Gaye
+ Harry Henry Gebel
+ Marius Gedminas
+ Thomas Gellekum
+@@ -320,6 +325,7 @@
+ Lars Gustäbel
+ Thomas Güttler
+ Barry Haddow
++Philipp Hagemeister
+ Paul ten Hagen
+ Rasmus Hahn
+ Peter Haight
+@@ -366,6 +372,7 @@
+ Albert Hofkamp
+ Tomas Hoger
+ Jonathan Hogg
++Akintayo Holder
+ Gerrit Holl
+ Shane Holloway
+ Rune Holm
+@@ -387,6 +394,7 @@
+ Greg Humphreys
+ Eric Huss
+ Jeremy Hylton
++Ludwig Hähne
+ Gerhard Häring
+ Fredrik Håård
+ Catalin Iacob
+@@ -409,6 +417,7 @@
+ Thomas Jarosch
+ Drew Jenkins
+ Flemming Kjær Jensen
++Philip Jenvey
+ Jiba
+ Orjan Johansen
+ Fredrik Johansson
+@@ -457,10 +466,12 @@
+ Kim Knapp
+ Lenny Kneler
+ Pat Knight
++Jeff Knupp
+ Greg Kochanski
+ Damon Kohler
+ Marko Kohtala
+ Joseph Koshy
++Jerzy Kozera
+ Maksim Kozyarchuk
+ Stefan Krah
+ Bob Kras
+@@ -499,6 +510,7 @@
+ Robert Lehmann
+ Petri Lehtinen
+ Luke Kenneth Casson Leighton
++Tshepang Lekhonkhobe
+ Marc-Andre Lemburg
+ John Lenton
+ Christopher Tur Lesniewski-Laas
+@@ -539,6 +551,7 @@
+ Vladimir Marangozov
+ David Marek
+ Doug Marien
++Sven Marnach
+ Alex Martelli
+ Anthony Martin
+ Sébastien Martini
+@@ -567,6 +580,7 @@
+ Mike Meyer
+ Steven Miale
+ Trent Mick
++Tom Middleton
+ Stan Mihai
+ Aristotelis Mikropoulos
+ Damien Miller
+@@ -577,6 +591,7 @@
+ Andrii V. Mishkovskyi
+ Dustin J. Mitchell
+ Dom Mitchell
++Florian Mladitsch
+ Doug Moen
+ The Dragon De Monsyne
+ Skip Montanaro
+@@ -681,6 +696,7 @@
+ Brian Quinlan
+ Anders Qvist
+ Burton Radons
++Jeff Ramnani
+ Brodie Rao
+ Antti Rasinen
+ Sridhar Ratnakumar
+@@ -709,11 +725,13 @@
+ Juan M. Bello Rivas
+ Davide Rizzo
+ Anthony Roach
++Carl Robben
+ Mark Roberts
+ Jim Robinson
+ Andy Robinson
+ Kevin Rodgers
+ Giampaolo Rodola
++Adi Roiban
+ Mike Romberg
+ Armin Ronacher
+ Case Roole
+@@ -749,6 +767,7 @@
+ Michael Scharf
+ Neil Schemenauer
+ David Scherer
++Hynek Schlawack
+ Gregor Schmid
+ Ralf Schmitt
+ Michael Schneider
+@@ -770,6 +789,7 @@
+ Denis Severson
+ Ian Seyer
+ Ha Shao
++Mark Shannon
+ Richard Shapiro
+ Bruce Sherwood
+ Alexander Shigin
+@@ -780,6 +800,7 @@
+ Itamar Shtull-Trauring
+ Eric Siegerman
+ Paul Sijben
++Tim Silk
+ Kirill Simonov
+ Nathan Paul Simons
+ Janne Sinkkonen
+@@ -810,6 +831,7 @@
+ Peter Stoehr
+ Casper Stoel
+ Michael Stone
++Serhiy Storchaka
+ Ken Stox
+ Patrick Strawderman
+ Dan Stromberg
+@@ -827,6 +849,7 @@
+ Geoff Talvola
+ William Tanksley
+ Christian Tanzer
++Stefano Taschini
+ Steven Taschuk
+ Monty Taylor
+ Amy Taylor
+@@ -878,6 +901,7 @@
+ Kurt Vile
+ Norman Vine
+ Frank Visser
++Johannes Vogel
+ Niki W. Waibel
+ Wojtek Walczak
+ Charles Waldman
+@@ -885,6 +909,7 @@
+ Larry Wall
+ Kevin Walzer
+ Greg Ward
++Zachary Ware
+ Barry Warsaw
+ Steve Waterbury
+ Bob Watson
+@@ -911,6 +936,7 @@
+ Gerald S. Williams
+ Frank Willison
+ Greg V. Wilson
++J Derek Wilson
+ Jody Winston
+ Collin Winter
+ Dik Winter
+diff -r 70274d53c1dd Misc/NEWS
+--- a/Misc/NEWS
++++ b/Misc/NEWS
+@@ -1,17 +1,375 @@
+ Python News
+ +++++++++++
+
+-What's New in Python 2.7.3 final?
+-=================================
+-
+-*Release date: 2012-04-09*
++What's New in Python 2.7.4
++==========================
++
++*Release date: XXXX-XX-XX*
++
++Core and Builtins
++-----------------
++
++- Issue #15041: Update "see also" list in tkinter documentation.
++
++- Issue #14579: Fix error handling bug in the utf-16 decoder. Patch by
++ Serhiy Storchaka.
++
++- Issue #15368: An issue that caused bytecode generation to be
++ non-deterministic when using randomized hashing (-R) has been fixed.
++
++- Issue #15033: Fix the exit status bug when modules invoked using -m swith,
++ return the proper failure return value (1). Patch contributed by Jeff Knupp.
++
++- Issue #12268: File readline, readlines and read() methods no longer lose
++ data when an underlying read system call is interrupted. IOError is no
++ longer raised due to a read system call returning EINTR from within these
++ methods.
++
++- Issue #13512: Create ~/.pypirc securely (CVE-2011-4944). Initial patch by
++ Philip Jenvey, tested by Mageia and Debian.
++
++- Issue #7719: Make distutils ignore ``.nfs*`` files instead of choking later
++ on. Initial patch by SilentGhost and Jeff Ramnani.
++
++- Issue #10053: Don't close FDs when FileIO.__init__ fails. Loosely based on
++ the work by Hirokazu Yamamoto.
++
++- Issue #14775: Fix a potential quadratic dict build-up due to the garbage
++ collector repeatedly trying to untrack dicts.
++
++- Issue #14494: Fix __future__.py and its documentation to note that
++ absolute imports are the default behavior in 3.0 instead of 2.7.
++ Patch by Sven Marnach.
++
++- Issue #14761: Fix potential leak on an error case in the import machinery.
++
++- Issue #14699: Fix calling the classmethod descriptor directly.
++
++- Issue #11603 (again): Setting __repr__ to __str__ now raises a RuntimeError
++ when repr() or str() is called on such an object.
++
++- Issue #14658: Fix binding a special method to a builtin implementation of a
++ special method with a different name.
++
++- Issue #14612: Fix jumping around with blocks by setting f_lineno.
++
++- Issue #13889: Check and (if necessary) set FPU control word before calling
++ any of the dtoa.c string <-> float conversion functions, on MSVC builds of
++ Python. This fixes issues when embedding Python in a Delphi app.
++
++- Issue #14505: Fix file descriptor leak when deallocating file objects
++ created with PyFile_FromString().
++
++- Issue #14474: Save and restore exception state in thread.start_new_thread()
++ while writing error message if the thread leaves a unhandled exception.
++
++- Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch
++ by Suman Saha.
++
++- Issue #14378: Fix compiling ast.ImportFrom nodes with a "__future__" string as
++ the module name that was not interned.
++
++- Issue #14331: Use significantly less stack space when importing modules by
++ allocating path buffers on the heap instead of the stack.
++
++- Issue #14334: Prevent in a segfault in type.__getattribute__ when it was not
++ passed strings. Also fix segfaults in the __getattribute__ and __setattr__
++ methods of old-style classes.
++
++- Issue #14161: fix the __repr__ of file objects to escape the file name.
++
++- Issue #1469629: Allow cycles through an object's __dict__ slot to be
++ collected. (For example if ``x.__dict__ is x``).
++
++- Issue #13521: dict.setdefault() now does only one lookup for the given key,
++ making it "atomic" for many purposes. Patch by Filip Gruszczyński.
++
++- Issue #10538: When using the "s*" code with PyArg_ParseTuple() to fill a
++ Py_buffer structure with data from an object supporting only the old
++ PyBuffer interface, a reference to the source objects is now properly added
++ to the Py_buffer.obj member.
+
+ Library
+ -------
+
++- Issue #11062: Fix adding a message from file to Babyl mailbox.
++
++- Issue #15646: Prevent equivalent of a fork bomb when using
++ multiprocessing on Windows without the "if __name__ == '__main__'"
++ idiom.
++
++- Issue #15567: Fix NameError when running threading._test
++
++- Issue #15424: Add a __sizeof__ implementation for array objects.
++ Patch by Ludwig Hähne.
++
++- Issue #13052: Fix IDLE crashing when replace string in Search/Replace dialog
++ ended with '\'. Patch by Roger Serwy.
++
++- Issue #15538: Fix compilation of the getnameinfo() / getaddrinfo()
++ emulation code. Patch by Philipp Hagemeister.
++
++- Issue #9803: Don't close IDLE on saving if breakpoint is open.
++ Patch by Roger Serwy.
++
++- Issue #12288: Consider '0' and '0.0' as valid initialvalue
++ for tkinter SimpleDialog.
++
++- Issue #15489: Add a __sizeof__ implementation for BytesIO objects.
++ Patch by Serhiy Storchaka.
++
++- Issue #15469: Add a __sizeof__ implementation for deque objects.
++ Patch by Serhiy Storchaka.
++
++- Issue #15487: Add a __sizeof__ implementation for buffered I/O objects.
++ Patch by Serhiy Storchaka.
++
++- Issue #15512: Add a __sizeof__ implementation for parser.
++ Patch by Serhiy Storchaka.
++
++- Issue #15402: An issue in the struct module that caused sys.getsizeof to
++ return incorrect results for struct.Struct instances has been fixed.
++ Initial patch by Serhiy Storchaka.
++
++- Issue #15232: when mangle_from is True, email.Generator now correctly mangles
++ lines that start with 'From ' that occur in a MIME preamble or epilog.
++
++- Issue #13922: argparse no longer incorrectly strips '--'s that appear
++ after the first one.
++
++- Issue #12353: argparse now correctly handles null argument values.
++
++- Issue #6493: An issue in ctypes on Windows that caused structure bitfields
++ of type ctypes.c_uint32 and width 32 to incorrectly be set has been fixed.
++
++- Issue #14635: telnetlib will use poll() rather than select() when possible
++ to avoid failing due to the select() file descriptor limit.
++
++- Issue #15247: FileIO now raises an error when given a file descriptor
++ pointing to a directory.
++
++- Issue #14591: Fix bug in Random.jumpahead that could produce an invalid
++ Mersenne Twister state on 64-bit machines.
++
++- Issue #5346: Preserve permissions of mbox, MMDF and Babyl mailbox
++ files on flush().
++
++- Issue #15219: Fix a reference leak when hashlib.new() is called with
++ invalid parameters.
++
++- Issue #9559: If messages were only added, a new file is no longer
++ created and renamed over the old file when flush() is called on an
++ mbox, MMDF or Babyl mailbox.
++
++- Issue #14653: email.utils.mktime_tz() no longer relies on system
++ mktime() when timezone offest is supplied.
++
++- Issue #6056: Make multiprocessing use setblocking(True) on the
++ sockets it uses. Original patch by J Derek Wilson.
++
++- Issue #15101: Make pool finalizer avoid joining current thread.
++
++- Issue #15054: A bug in tokenize.tokenize that caused string literals
++ with 'b' and 'br' prefixes to be incorrectly tokenized has been fixed.
++ Patch by Serhiy Storchaka.
++
++- Issue #15036: Mailbox no longer throws an error if a flush is done
++ between operations when removing or changing multiple items in mbox,
++ MMDF, or Babyl mailboxes.
++
++- Issue #10133: Make multiprocessing deallocate buffer if socket read
++ fails. Patch by Hallvard B Furuseth.
++
++- Issue #13854: Make multiprocessing properly handle non-integer
++ non-string argument to SystemExit.
++
++- Issue #12157: Make pool.map() empty iterables correctly. Initial
++ patch by mouad.
++
++- Issue #14962: Update text coloring in IDLE shell window after changing
++ options. Patch by Roger Serwy.
++
++- Issue #10997: Prevent a duplicate entry in IDLE's "Recent Files" menu.
++
++- Issue #12510: Attempting to get invalid tooltip no longer closes Idle.
++ Original patch by Roger Serwy.
++
++- Issue #10365: File open dialog now works instead of crashing
++ even when parent window is closed. Patch by Roger Serwy.
++
++- Issue #14876: Use user-selected font for highlight configuration.
++ Patch by Roger Serwy.
++
++- Issue #14036: Add an additional check to validate that port in urlparse does
++ not go in illegal range and returns None.
++
++- Issue #14888: Fix misbehaviour of the _md5 module when called on data
++ larger than 2**32 bytes.
++
++- Issue #14875: Use float('inf') instead of float('1e66666') in the json module.
++
++- Issue #14572: Prevent build failures with pre-3.5.0 versions of
++ sqlite3, such as was shipped with Centos 5 and Mac OS X 10.4.
++
++- Issue #14426: Correct the Date format in Expires attribute of Set-Cookie
++ Header in Cookie.py.
++
++- Issue #14721: Send proper header, Content-length: 0 when the body is an empty
++ string ''. Initial Patch contributed by Arve Knudsen.
++
++- Issue #14072: Fix parsing of 'tel' URIs in urlparse by making the check for
++ ports stricter.
++
++- Issue #9374: Generic parsing of query and fragment portions of url for any
++ scheme. Supported both by RFC3986 and RFC2396.
++
++- Issue #14798: Fix the functions in pyclbr to raise an ImportError
++ when the first part of a dotted name is not a package. Patch by
++ Xavier de Gaye.
++
++- Issue #14832: fixed the order of the argument references in the error
++ message produced by unittest's assertItemsEqual.
++
++- Issue #14829: Fix bisect issues under 64-bit Windows.
++
++- Issue #14777: tkinter may return undecoded UTF-8 bytes as a string when
++ accessing the Tk clipboard. Modify clipboad_get() to first request type
++ UTF8_STRING when no specific type is requested in an X11 windowing
++ environment, falling back to the current default type STRING if that fails.
++ Original patch by Thomas Kluyver.
++
++- Issue #12541: Be lenient with quotes around Realm field with HTTP Basic
++ Authentation in urllib2.
++
++- Issue #14662: Prevent shutil failures on OS X when destination does not
++ support chflag operations. Patch by Hynek Schlawack.
++
++- Issue #14157: Fix time.strptime failing without a year on February 29th.
++ Patch by Hynek Schlawack.
++
++- Issue #14768: os.path.expanduser('~/a') doesn't works correctly when HOME is '/'.
++
++- Issue #13183: Fix pdb skipping frames after hitting a breakpoint and running
++ step. Patch by Xavier de Gaye.
++
++- Issue #14664: It is now possible to use @unittest.skip{If,Unless} on a
++ test class that doesn't inherit from TestCase (i.e. a mixin).
++
++- Issue #14160: TarFile.extractfile() failed to resolve symbolic links when
++ the links were not located in an archive subdirectory.
++
++- Issue #14638: pydoc now treats non-string __name__ values as if they
++ were missing, instead of raising an error.
++
++- Issue #13684: Fix httplib tunnel issue of infinite loops for certain sites
++ which send EOF without trailing \r\n.
++
++- Issue #14308: Fix an exception when a "dummy" thread is in the threading
++ module's active list after a fork().
++
++- Issue #14538: HTMLParser can now parse correctly start tags that contain
++ a bare '/'.
++
++- Issue #14452: SysLogHandler no longer inserts a UTF-8 BOM into the message.
++
++- Issue #13496: Fix potential overflow in bisect.bisect algorithm when applied
++ to a collection of size > sys.maxsize / 2.
++
++- Issue #14399: zipfile now recognizes that the archive has been modified even
++ if only the comment is changed. As a consequence of this fix, ZipFile is now
++ a new style class.
++
++- Issue #7978: SocketServer now restarts the select() call when EINTR is
++ returned. This avoids crashing the server loop when a signal is received.
++ Patch by Jerzy Kozera.
++
++- Issue #14409: IDLE now properly executes commands in the Shell window
++ when it cannot read the normal config files on startup and
++ has to use the built-in default key bindings.
++ There was previously a bug in one of the defaults.
++
++- Issue #10340: asyncore - properly handle EINVAL in dispatcher constructor on
++ OSX; avoid to call handle_connect in case of a disconnected socket which
++ was not meant to connect.
++
++- Issue #12757: Fix the skipping of doctests when python is run with -OO so
++ that it works in unittest's verbose mode as well as non-verbose mode.
++
++- Issue #3573: IDLE hangs when passing invalid command line args
++ (directory(ies) instead of file(s)) (Patch by Guilherme Polo)
++
++- Issue #13694: asynchronous connect in asyncore.dispatcher does not set addr
++ attribute.
++
++- Issue #10484: Fix the CGIHTTPServer's PATH_INFO handling problem.
++
++- Issue #11199: Fix the with urllib which hangs on particular ftp urls.
++
++- Issue #5219: Prevent event handler cascade in IDLE.
++
++- Issue #14252: Fix subprocess.Popen.terminate() to not raise an error under
++ Windows when the child process has already exited.
++
++- Issue #14195: An issue that caused weakref.WeakSet instances to incorrectly
++ return True for a WeakSet instance 'a' in both 'a < a' and 'a > a' has been
++ fixed.
++
++- Issue #14159: Fix the len() of weak sets to return a better approximation
++ when some objects are dead or dying. Moreover, the implementation is now
++ O(1) rather than O(n).
++
++- Issue #2945: Make the distutils upload command aware of bdist_rpm products.
++
++- Issue #13447: Add a test file to host regression tests for bugs in the
++ scripts found in the Tools directory.
++
+ - Issue #6884: Fix long-standing bugs with MANIFEST.in parsing in distutils
+ on Windows.
+
++Tests
++-----
++
++- Issue #15496: Add directory removal helpers for tests on Windows.
++ Patch by Jeremy Kloth.
++
++- Issue #15043: test_gdb is now skipped entirely if gdb security settings
++ block loading of the gdb hooks
++
++- Issue #14589: Update certificate chain for sha256.tbs-internet.com, fixing
++ a test failure in test_ssl.
++
++Build
++-----
++
++- Issue #15560: Fix building _sqlite3 extension on OS X with an SDK.
++
++- Issue #8847: Disable COMDAT folding in Windows PGO builds.
++
++- Issue #14018: Fix OS X Tcl/Tk framework checking when using OS X SDKs.
++
++- Issue #8767: Restore building with --disable-unicode.
++ Patch by Stefano Taschini.
++
++- Build against bzip2 1.0.6 and openssl 0.9.8x on Windows.
++
++- Issue #14557: Fix extensions build on HP-UX. Patch by Adi Roiban.
++
++- Issue #14437: Fix building the _io module under Cygwin.
++
++Documentation
++-------------
++
++- Issue #15630: Add an example for "continue" stmt in the tutorial. Patch by
++ Daniel Ellis.
++
++- Issue #13557: Clarify effect of giving two different namespaces to exec or
++ execfile().
++
++- Issue #14034: added the argparse tutorial.
++
++- Issue #15250: Document that filecmp.dircmp compares files shallowly. Patch
++ contributed by Chris Jerdonek.
++
+
+ What's New in Python 2.7.3 release candidate 2?
+ ===============================================
+@@ -498,6 +856,10 @@
+ Extension Modules
+ -----------------
+
++- Issue #9041: An issue in ctypes.c_longdouble, ctypes.c_double, and
++ ctypes.c_float that caused an incorrect exception to be returned in the
++ case of overflow has been fixed.
++
+ - bsddb module: Erratic behaviour of "DBEnv->rep_elect()" because a typo.
+ Possible crash.
+
+@@ -572,6 +934,9 @@
+ Tests
+ -----
+
++- Issue #15467: Move helpers for __sizeof__ tests into test_support.
++ Patch by Serhiy Storchaka.
++
+ - Issue #11689: Fix a variable scoping error in an sqlite3 test.
+ Initial patch by Torsten Landschoff.
+
+@@ -624,7 +989,8 @@
+ Documentation
+ -------------
+
+-- Issue #13995: Fix errors in sqlite3's Cursor.rowcount documentation
++- Issues #13491 and #13995: Fix many errors in sqlite3 documentation.
++ Initial patch for #13491 by Johannes Vogel.
+
+ - Issue #13402: Document absoluteness of sys.executable.
+
+diff -r 70274d53c1dd Misc/README.OpenBSD
+--- a/Misc/README.OpenBSD
++++ b/Misc/README.OpenBSD
+@@ -29,7 +29,7 @@
+
+ If your version is not in that list, e.g., 3.9, add the version
+ number. In this case, you would just need to add a 9 after the 8.
+-If you modify configure.in, you will need to regenerate configure
++If you modify configure.ac, you will need to regenerate configure
+ with autoconf.
+
+ If your version is already in the list, this is not a known problem.
+diff -r 70274d53c1dd Misc/python.man
+--- a/Misc/python.man
++++ b/Misc/python.man
+@@ -459,7 +459,7 @@
+ .br
+ Documentation: http://docs.python.org/
+ .br
+-Developer resources: http://www.python.org/dev/
++Developer resources: http://docs.python.org/devguide/
+ .br
+ Downloads: http://python.org/download/
+ .br
+diff -r 70274d53c1dd Modules/_bisectmodule.c
+--- a/Modules/_bisectmodule.c
++++ b/Modules/_bisectmodule.c
+@@ -21,7 +21,10 @@
+ return -1;
+ }
+ while (lo < hi) {
+- mid = (lo + hi) / 2;
++ /* The (size_t)cast ensures that the addition and subsequent division
++ are performed as unsigned operations, avoiding difficulties from
++ signed overflow. (See issue 13496.) */
++ mid = ((size_t)lo + hi) / 2;
+ litem = PySequence_GetItem(list, mid);
+ if (litem == NULL)
+ return -1;
+@@ -56,7 +59,8 @@
+ }
+
+ PyDoc_STRVAR(bisect_right_doc,
+-"bisect_right(a, x[, lo[, hi]]) -> index\n\
++"bisect(a, x[, lo[, hi]]) -> index\n\
++bisect_right(a, x[, lo[, hi]]) -> index\n\
+ \n\
+ Return the index where to insert item x in list a, assuming a is sorted.\n\
+ \n\
+@@ -97,7 +101,8 @@
+ }
+
+ PyDoc_STRVAR(insort_right_doc,
+-"insort_right(a, x[, lo[, hi]])\n\
++"insort(a, x[, lo[, hi]])\n\
++insort_right(a, x[, lo[, hi]])\n\
+ \n\
+ Insert item x in list a, and keep it sorted assuming a is sorted.\n\
+ \n\
+@@ -122,7 +127,10 @@
+ return -1;
+ }
+ while (lo < hi) {
+- mid = (lo + hi) / 2;
++ /* The (size_t)cast ensures that the addition and subsequent division
++ are performed as unsigned operations, avoiding difficulties from
++ signed overflow. (See issue 13496.) */
++ mid = ((size_t)lo + hi) / 2;
+ litem = PySequence_GetItem(list, mid);
+ if (litem == NULL)
+ return -1;
+@@ -187,7 +195,7 @@
+ if (PyList_Insert(list, index, item) < 0)
+ return NULL;
+ } else {
+- result = PyObject_CallMethod(list, "insert", "iO",
++ result = PyObject_CallMethod(list, "insert", "nO",
+ index, item);
+ if (result == NULL)
+ return NULL;
+@@ -207,18 +215,15 @@
+ Optional args lo (default 0) and hi (default len(a)) bound the\n\
+ slice of a to be searched.\n");
+
+-PyDoc_STRVAR(bisect_doc, "Alias for bisect_right().\n");
+-PyDoc_STRVAR(insort_doc, "Alias for insort_right().\n");
+-
+ static PyMethodDef bisect_methods[] = {
+ {"bisect_right", (PyCFunction)bisect_right,
+ METH_VARARGS|METH_KEYWORDS, bisect_right_doc},
+ {"bisect", (PyCFunction)bisect_right,
+- METH_VARARGS|METH_KEYWORDS, bisect_doc},
++ METH_VARARGS|METH_KEYWORDS, bisect_right_doc},
+ {"insort_right", (PyCFunction)insort_right,
+ METH_VARARGS|METH_KEYWORDS, insort_right_doc},
+ {"insort", (PyCFunction)insort_right,
+- METH_VARARGS|METH_KEYWORDS, insort_doc},
++ METH_VARARGS|METH_KEYWORDS, insort_right_doc},
+ {"bisect_left", (PyCFunction)bisect_left,
+ METH_VARARGS|METH_KEYWORDS, bisect_left_doc},
+ {"insort_left", (PyCFunction)insort_left,
+diff -r 70274d53c1dd Modules/_collectionsmodule.c
+--- a/Modules/_collectionsmodule.c
++++ b/Modules/_collectionsmodule.c
+@@ -991,6 +991,23 @@
+ }
+
+ static PyObject *
++deque_sizeof(dequeobject *deque, void *unused)
++{
++ Py_ssize_t res;
++ Py_ssize_t blocks;
++
++ res = sizeof(dequeobject);
++ blocks = (deque->leftindex + deque->len + BLOCKLEN - 1) / BLOCKLEN;
++ assert(deque->leftindex + deque->len - 1 ==
++ (blocks - 1) * BLOCKLEN + deque->rightindex);
++ res += blocks * sizeof(block);
++ return PyLong_FromSsize_t(res);
++}
++
++PyDoc_STRVAR(sizeof_doc,
++"D.__sizeof__() -- size of D in memory, in bytes");
++
++static PyObject *
+ deque_get_maxlen(dequeobject *deque)
+ {
+ if (deque->maxlen == -1)
+@@ -1053,7 +1070,9 @@
+ {"reverse", (PyCFunction)deque_reverse,
+ METH_NOARGS, reverse_doc},
+ {"rotate", (PyCFunction)deque_rotate,
+- METH_VARARGS, rotate_doc},
++ METH_VARARGS, rotate_doc},
++ {"__sizeof__", (PyCFunction)deque_sizeof,
++ METH_NOARGS, sizeof_doc},
+ {NULL, NULL} /* sentinel */
+ };
+
+diff -r 70274d53c1dd Modules/_ctypes/cfield.c
+--- a/Modules/_ctypes/cfield.c
++++ b/Modules/_ctypes/cfield.c
+@@ -431,12 +431,8 @@
+ #define LOW_BIT(x) ((x) & 0xFFFF)
+ #define NUM_BITS(x) ((x) >> 16)
+
+-/* This seems nore a compiler issue than a Windows/non-Windows one */
+-#ifdef MS_WIN32
+-# define BIT_MASK(size) ((1 << NUM_BITS(size))-1)
+-#else
+-# define BIT_MASK(size) ((1LL << NUM_BITS(size))-1)
+-#endif
++/* Doesn't work if NUM_BITS(size) == 0, but it never happens in SET() call. */
++#define BIT_MASK(type, size) (((((type)1 << (NUM_BITS(size) - 1)) - 1) << 1) + 1)
+
+ /* This macro CHANGES the first parameter IN PLACE. For proper sign handling,
+ we must first shift left, then right.
+@@ -448,10 +444,10 @@
+ }
+
+ /* This macro RETURNS the first parameter with the bit field CHANGED. */
+-#define SET(x, v, size) \
++#define SET(type, x, v, size) \
+ (NUM_BITS(size) ? \
+- ( ( x & ~(BIT_MASK(size) << LOW_BIT(size)) ) | ( (v & BIT_MASK(size)) << LOW_BIT(size) ) ) \
+- : v)
++ ( ( (type)x & ~(BIT_MASK(type, size) << LOW_BIT(size)) ) | ( ((type)v & BIT_MASK(type, size)) << LOW_BIT(size) ) ) \
++ : (type)v)
+
+ /* byte swapping macros */
+ #define SWAP_2(v) \
+@@ -523,7 +519,7 @@
+ long val;
+ if (get_long(value, &val) < 0)
+ return NULL;
+- *(signed char *)ptr = (signed char)SET(*(signed char *)ptr, (signed char)val, size);
++ *(signed char *)ptr = SET(signed char, *(signed char *)ptr, val, size);
+ _RET(value);
+ }
+
+@@ -542,8 +538,7 @@
+ unsigned long val;
+ if (get_ulong(value, &val) < 0)
+ return NULL;
+- *(unsigned char *)ptr = (unsigned char)SET(*(unsigned char*)ptr,
+- (unsigned short)val, size);
++ *(unsigned char *)ptr = SET(unsigned char, *(unsigned char*)ptr, val, size);
+ _RET(value);
+ }
+
+@@ -564,7 +559,7 @@
+ if (get_long(value, &val) < 0)
+ return NULL;
+ memcpy(&x, ptr, sizeof(x));
+- x = SET(x, (short)val, size);
++ x = SET(short, x, val, size);
+ memcpy(ptr, &x, sizeof(x));
+ _RET(value);
+ }
+@@ -579,7 +574,7 @@
+ return NULL;
+ memcpy(&field, ptr, sizeof(field));
+ field = SWAP_2(field);
+- field = SET(field, (short)val, size);
++ field = SET(short, field, val, size);
+ field = SWAP_2(field);
+ memcpy(ptr, &field, sizeof(field));
+ _RET(value);
+@@ -612,7 +607,7 @@
+ if (get_ulong(value, &val) < 0)
+ return NULL;
+ memcpy(&x, ptr, sizeof(x));
+- x = SET(x, (unsigned short)val, size);
++ x = SET(unsigned short, x, val, size);
+ memcpy(ptr, &x, sizeof(x));
+ _RET(value);
+ }
+@@ -626,7 +621,7 @@
+ return NULL;
+ memcpy(&field, ptr, sizeof(field));
+ field = SWAP_2(field);
+- field = SET(field, (unsigned short)val, size);
++ field = SET(unsigned short, field, val, size);
+ field = SWAP_2(field);
+ memcpy(ptr, &field, sizeof(field));
+ _RET(value);
+@@ -660,7 +655,7 @@
+ if (get_long(value, &val) < 0)
+ return NULL;
+ memcpy(&x, ptr, sizeof(x));
+- x = SET(x, (int)val, size);
++ x = SET(int, x, val, size);
+ memcpy(ptr, &x, sizeof(x));
+ _RET(value);
+ }
+@@ -674,7 +669,7 @@
+ return NULL;
+ memcpy(&field, ptr, sizeof(field));
+ field = SWAP_INT(field);
+- field = SET(field, (int)val, size);
++ field = SET(int, field, val, size);
+ field = SWAP_INT(field);
+ memcpy(ptr, &field, sizeof(field));
+ _RET(value);
+@@ -761,7 +756,7 @@
+ if (get_ulong(value, &val) < 0)
+ return NULL;
+ memcpy(&x, ptr, sizeof(x));
+- x = SET(x, (unsigned int)val, size);
++ x = SET(unsigned int, x, val, size);
+ memcpy(ptr, &x, sizeof(x));
+ _RET(value);
+ }
+@@ -774,7 +769,7 @@
+ if (get_ulong(value, &val) < 0)
+ return NULL;
+ memcpy(&field, ptr, sizeof(field));
+- field = (unsigned int)SET(field, (unsigned int)val, size);
++ field = SET(unsigned int, field, (unsigned int)val, size);
+ field = SWAP_INT(field);
+ memcpy(ptr, &field, sizeof(field));
+ _RET(value);
+@@ -808,7 +803,7 @@
+ if (get_long(value, &val) < 0)
+ return NULL;
+ memcpy(&x, ptr, sizeof(x));
+- x = SET(x, val, size);
++ x = SET(long, x, val, size);
+ memcpy(ptr, &x, sizeof(x));
+ _RET(value);
+ }
+@@ -822,7 +817,7 @@
+ return NULL;
+ memcpy(&field, ptr, sizeof(field));
+ field = SWAP_LONG(field);
+- field = (long)SET(field, val, size);
++ field = SET(long, field, val, size);
+ field = SWAP_LONG(field);
+ memcpy(ptr, &field, sizeof(field));
+ _RET(value);
+@@ -856,7 +851,7 @@
+ if (get_ulong(value, &val) < 0)
+ return NULL;
+ memcpy(&x, ptr, sizeof(x));
+- x = SET(x, val, size);
++ x = SET(unsigned long, x, val, size);
+ memcpy(ptr, &x, sizeof(x));
+ _RET(value);
+ }
+@@ -870,7 +865,7 @@
+ return NULL;
+ memcpy(&field, ptr, sizeof(field));
+ field = SWAP_LONG(field);
+- field = (unsigned long)SET(field, val, size);
++ field = SET(unsigned long, field, val, size);
+ field = SWAP_LONG(field);
+ memcpy(ptr, &field, sizeof(field));
+ _RET(value);
+@@ -905,7 +900,7 @@
+ if (get_longlong(value, &val) < 0)
+ return NULL;
+ memcpy(&x, ptr, sizeof(x));
+- x = SET(x, val, size);
++ x = SET(PY_LONG_LONG, x, val, size);
+ memcpy(ptr, &x, sizeof(x));
+ _RET(value);
+ }
+@@ -919,7 +914,7 @@
+ return NULL;
+ memcpy(&field, ptr, sizeof(field));
+ field = SWAP_8(field);
+- field = (PY_LONG_LONG)SET(field, val, size);
++ field = SET(PY_LONG_LONG, field, val, size);
+ field = SWAP_8(field);
+ memcpy(ptr, &field, sizeof(field));
+ _RET(value);
+@@ -952,7 +947,7 @@
+ if (get_ulonglong(value, &val) < 0)
+ return NULL;
+ memcpy(&x, ptr, sizeof(x));
+- x = SET(x, val, size);
++ x = SET(PY_LONG_LONG, x, val, size);
+ memcpy(ptr, &x, sizeof(x));
+ _RET(value);
+ }
+@@ -966,7 +961,7 @@
+ return NULL;
+ memcpy(&field, ptr, sizeof(field));
+ field = SWAP_8(field);
+- field = (unsigned PY_LONG_LONG)SET(field, val, size);
++ field = SET(unsigned PY_LONG_LONG, field, val, size);
+ field = SWAP_8(field);
+ memcpy(ptr, &field, sizeof(field));
+ _RET(value);
+@@ -1003,12 +998,8 @@
+ long double x;
+
+ x = PyFloat_AsDouble(value);
+- if (x == -1 && PyErr_Occurred()) {
+- PyErr_Format(PyExc_TypeError,
+- " float expected instead of %s instance",
+- value->ob_type->tp_name);
++ if (x == -1 && PyErr_Occurred())
+ return NULL;
+- }
+ memcpy(ptr, &x, sizeof(long double));
+ _RET(value);
+ }
+@@ -1027,12 +1018,8 @@
+ double x;
+
+ x = PyFloat_AsDouble(value);
+- if (x == -1 && PyErr_Occurred()) {
+- PyErr_Format(PyExc_TypeError,
+- " float expected instead of %s instance",
+- value->ob_type->tp_name);
++ if (x == -1 && PyErr_Occurred())
+ return NULL;
+- }
+ memcpy(ptr, &x, sizeof(double));
+ _RET(value);
+ }
+@@ -1051,12 +1038,8 @@
+ double x;
+
+ x = PyFloat_AsDouble(value);
+- if (x == -1 && PyErr_Occurred()) {
+- PyErr_Format(PyExc_TypeError,
+- " float expected instead of %s instance",
+- value->ob_type->tp_name);
++ if (x == -1 && PyErr_Occurred())
+ return NULL;
+- }
+ #ifdef WORDS_BIGENDIAN
+ if (_PyFloat_Pack8(x, (unsigned char *)ptr, 1))
+ return NULL;
+@@ -1083,12 +1066,8 @@
+ float x;
+
+ x = (float)PyFloat_AsDouble(value);
+- if (x == -1 && PyErr_Occurred()) {
+- PyErr_Format(PyExc_TypeError,
+- " float expected instead of %s instance",
+- value->ob_type->tp_name);
++ if (x == -1 && PyErr_Occurred())
+ return NULL;
+- }
+ memcpy(ptr, &x, sizeof(x));
+ _RET(value);
+ }
+@@ -1107,12 +1086,8 @@
+ float x;
+
+ x = (float)PyFloat_AsDouble(value);
+- if (x == -1 && PyErr_Occurred()) {
+- PyErr_Format(PyExc_TypeError,
+- " float expected instead of %s instance",
+- value->ob_type->tp_name);
++ if (x == -1 && PyErr_Occurred())
+ return NULL;
+- }
+ #ifdef WORDS_BIGENDIAN
+ if (_PyFloat_Pack4(x, (unsigned char *)ptr, 1))
+ return NULL;
+diff -r 70274d53c1dd Modules/_hashopenssl.c
+--- a/Modules/_hashopenssl.c
++++ b/Modules/_hashopenssl.c
+@@ -477,6 +477,7 @@
+ }
+
+ if (!PyArg_Parse(name_obj, "s", &name)) {
++ PyBuffer_Release(&view);
+ PyErr_SetString(PyExc_TypeError, "name must be a string");
+ return NULL;
+ }
+diff -r 70274d53c1dd Modules/_io/_iomodule.c
+--- a/Modules/_io/_iomodule.c
++++ b/Modules/_io/_iomodule.c
+@@ -58,7 +58,7 @@
+ "\n"
+ "At the top of the I/O hierarchy is the abstract base class IOBase. It\n"
+ "defines the basic interface to a stream. Note, however, that there is no\n"
+-"seperation between reading and writing to streams; implementations are\n"
++"separation between reading and writing to streams; implementations are\n"
+ "allowed to throw an IOError if they do not support a given operation.\n"
+ "\n"
+ "Extending IOBase is RawIOBase which deals simply with the reading and\n"
+diff -r 70274d53c1dd Modules/_io/_iomodule.h
+--- a/Modules/_io/_iomodule.h
++++ b/Modules/_io/_iomodule.h
+@@ -72,7 +72,7 @@
+ PyObject *filename; /* Not used, but part of the IOError object */
+ Py_ssize_t written;
+ } PyBlockingIOErrorObject;
+-PyAPI_DATA(PyObject *) PyExc_BlockingIOError;
++extern PyObject *PyExc_BlockingIOError;
+
+ /*
+ * Offset type for positioning.
+diff -r 70274d53c1dd Modules/_io/bufferedio.c
+--- a/Modules/_io/bufferedio.c
++++ b/Modules/_io/bufferedio.c
+@@ -386,6 +386,17 @@
+ Py_TYPE(self)->tp_free((PyObject *)self);
+ }
+
++static PyObject *
++buffered_sizeof(buffered *self, void *unused)
++{
++ Py_ssize_t res;
++
++ res = sizeof(buffered);
++ if (self->buffer)
++ res += self->buffer_size;
++ return PyLong_FromSsize_t(res);
++}
++
+ static int
+ buffered_traverse(buffered *self, visitproc visit, void *arg)
+ {
+@@ -1560,6 +1571,7 @@
+ {"seek", (PyCFunction)buffered_seek, METH_VARARGS},
+ {"tell", (PyCFunction)buffered_tell, METH_NOARGS},
+ {"truncate", (PyCFunction)buffered_truncate, METH_VARARGS},
++ {"__sizeof__", (PyCFunction)buffered_sizeof, METH_NOARGS},
+ {NULL, NULL}
+ };
+
+@@ -1952,6 +1964,7 @@
+ {"flush", (PyCFunction)buffered_flush, METH_NOARGS},
+ {"seek", (PyCFunction)buffered_seek, METH_VARARGS},
+ {"tell", (PyCFunction)buffered_tell, METH_NOARGS},
++ {"__sizeof__", (PyCFunction)buffered_sizeof, METH_NOARGS},
+ {NULL, NULL}
+ };
+
+@@ -2347,6 +2360,7 @@
+ {"readline", (PyCFunction)buffered_readline, METH_VARARGS},
+ {"peek", (PyCFunction)buffered_peek, METH_VARARGS},
+ {"write", (PyCFunction)bufferedwriter_write, METH_VARARGS},
++ {"__sizeof__", (PyCFunction)buffered_sizeof, METH_NOARGS},
+ {NULL, NULL}
+ };
+
+diff -r 70274d53c1dd Modules/_io/bytesio.c
+--- a/Modules/_io/bytesio.c
++++ b/Modules/_io/bytesio.c
+@@ -794,6 +794,17 @@
+ return 0;
+ }
+
++static PyObject *
++bytesio_sizeof(bytesio *self, void *unused)
++{
++ Py_ssize_t res;
++
++ res = sizeof(bytesio);
++ if (self->buf)
++ res += self->buf_size;
++ return PyLong_FromSsize_t(res);
++}
++
+ static int
+ bytesio_traverse(bytesio *self, visitproc visit, void *arg)
+ {
+@@ -835,6 +846,7 @@
+ {"truncate", (PyCFunction)bytesio_truncate, METH_VARARGS, truncate_doc},
+ {"__getstate__", (PyCFunction)bytesio_getstate, METH_NOARGS, NULL},
+ {"__setstate__", (PyCFunction)bytesio_setstate, METH_O, NULL},
++ {"__sizeof__", (PyCFunction)bytesio_sizeof, METH_NOARGS, NULL},
+ {NULL, NULL} /* sentinel */
+ };
+
+diff -r 70274d53c1dd Modules/_io/fileio.c
+--- a/Modules/_io/fileio.c
++++ b/Modules/_io/fileio.c
+@@ -137,22 +137,15 @@
+ directories, so we need a check. */
+
+ static int
+-dircheck(fileio* self, const char *name)
++dircheck(fileio* self, PyObject *nameobj)
+ {
+ #if defined(HAVE_FSTAT) && defined(S_IFDIR) && defined(EISDIR)
+ struct stat buf;
+ if (self->fd < 0)
+ return 0;
+ if (fstat(self->fd, &buf) == 0 && S_ISDIR(buf.st_mode)) {
+- char *msg = strerror(EISDIR);
+- PyObject *exc;
+- if (internal_close(self))
+- return -1;
+-
+- exc = PyObject_CallFunction(PyExc_IOError, "(iss)",
+- EISDIR, msg, name);
+- PyErr_SetObject(PyExc_IOError, exc);
+- Py_XDECREF(exc);
++ errno = EISDIR;
++ PyErr_SetFromErrnoWithFilenameObject(PyExc_IOError, nameobj);
+ return -1;
+ }
+ #endif
+@@ -195,12 +188,17 @@
+ int flags = 0;
+ int fd = -1;
+ int closefd = 1;
++ int fd_is_own = 0;
+
+ assert(PyFileIO_Check(oself));
+ if (self->fd >= 0) {
+- /* Have to close the existing file first. */
+- if (internal_close(self) < 0)
+- return -1;
++ if (self->closefd) {
++ /* Have to close the existing file first. */
++ if (internal_close(self) < 0)
++ return -1;
++ }
++ else
++ self->fd = -1;
+ }
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|si:fileio",
+@@ -341,6 +339,7 @@
+ #endif
+ self->fd = open(name, flags, 0666);
+ Py_END_ALLOW_THREADS
++ fd_is_own = 1;
+ if (self->fd < 0) {
+ #ifdef MS_WINDOWS
+ if (widename != NULL)
+@@ -350,9 +349,9 @@
+ PyErr_SetFromErrnoWithFilename(PyExc_IOError, name);
+ goto error;
+ }
+- if(dircheck(self, name) < 0)
+- goto error;
+ }
++ if (dircheck(self, nameobj) < 0)
++ goto error;
+
+ if (PyObject_SetAttrString((PyObject *)self, "name", nameobj) < 0)
+ goto error;
+@@ -362,19 +361,17 @@
+ end of file (otherwise, it might be done only on the
+ first write()). */
+ PyObject *pos = portable_lseek(self->fd, NULL, 2);
+- if (pos == NULL) {
+- if (closefd) {
+- close(self->fd);
+- self->fd = -1;
+- }
++ if (pos == NULL)
+ goto error;
+- }
+ Py_DECREF(pos);
+ }
+
+ goto done;
+
+ error:
++ if (!fd_is_own)
++ self->fd = -1;
++
+ ret = -1;
+
+ done:
+diff -r 70274d53c1dd Modules/_io/textio.c
+--- a/Modules/_io/textio.c
++++ b/Modules/_io/textio.c
+@@ -622,15 +622,22 @@
+ "errors determines the strictness of encoding and decoding (see the\n"
+ "codecs.register) and defaults to \"strict\".\n"
+ "\n"
+- "newline can be None, '', '\\n', '\\r', or '\\r\\n'. It controls the\n"
+- "handling of line endings. If it is None, universal newlines is\n"
+- "enabled. With this enabled, on input, the lines endings '\\n', '\\r',\n"
+- "or '\\r\\n' are translated to '\\n' before being returned to the\n"
+- "caller. Conversely, on output, '\\n' is translated to the system\n"
+- "default line seperator, os.linesep. If newline is any other of its\n"
+- "legal values, that newline becomes the newline when the file is read\n"
+- "and it is returned untranslated. On output, '\\n' is converted to the\n"
+- "newline.\n"
++ "newline controls how line endings are handled. It can be None, '',\n"
++ "'\\n', '\\r', and '\\r\\n'. It works as follows:\n"
++ "\n"
++ "* On input, if newline is None, universal newlines mode is\n"
++ " enabled. Lines in the input can end in '\\n', '\\r', or '\\r\\n', and\n"
++ " these are translated into '\\n' before being returned to the\n"
++ " caller. If it is '', universal newline mode is enabled, but line\n"
++ " endings are returned to the caller untranslated. If it has any of\n"
++ " the other legal values, input lines are only terminated by the given\n"
++ " string, and the line ending is returned to the caller untranslated.\n"
++ "\n"
++ "* On output, if newline is None, any '\\n' characters written are\n"
++ " translated to the system default line separator, os.linesep. If\n"
++ " newline is '', no translation takes place. If newline is any of the\n"
++ " other legal values, any '\\n' characters written are translated to\n"
++ " the given string.\n"
+ "\n"
+ "If line_buffering is True, a call to flush is implied when a call to\n"
+ "write contains a newline character."
+diff -r 70274d53c1dd Modules/_json.c
+--- a/Modules/_json.c
++++ b/Modules/_json.c
+@@ -1032,7 +1032,7 @@
+ while (idx <= end_idx) {
+ /* read key */
+ if (str[idx] != '"') {
+- raise_errmsg("Expecting property name", pystr, idx);
++ raise_errmsg("Expecting property name enclosed in double quotes", pystr, idx);
+ goto bail;
+ }
+ key = scanstring_unicode(pystr, idx + 1, strict, &next_idx);
+@@ -1043,7 +1043,7 @@
+ /* skip whitespace between key and : delimiter, read :, skip whitespace */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+ if (idx > end_idx || str[idx] != ':') {
+- raise_errmsg("Expecting : delimiter", pystr, idx);
++ raise_errmsg("Expecting ':' delimiter", pystr, idx);
+ goto bail;
+ }
+ idx++;
+@@ -1075,7 +1075,7 @@
+ break;
+ }
+ else if (str[idx] != ',') {
+- raise_errmsg("Expecting , delimiter", pystr, idx);
++ raise_errmsg("Expecting ',' delimiter", pystr, idx);
+ goto bail;
+ }
+ idx++;
+@@ -1236,7 +1236,7 @@
+ break;
+ }
+ else if (str[idx] != ',') {
+- raise_errmsg("Expecting , delimiter", pystr, idx);
++ raise_errmsg("Expecting ',' delimiter", pystr, idx);
+ goto bail;
+ }
+ idx++;
+diff -r 70274d53c1dd Modules/_multiprocessing/socket_connection.c
+--- a/Modules/_multiprocessing/socket_connection.c
++++ b/Modules/_multiprocessing/socket_connection.c
+@@ -117,7 +117,7 @@
+ conn_recv_string(ConnectionObject *conn, char *buffer,
+ size_t buflength, char **newbuffer, size_t maxlength)
+ {
+- int res;
++ Py_ssize_t res;
+ UINT32 ulength;
+
+ *newbuffer = NULL;
+@@ -132,20 +132,23 @@
+ if (ulength > maxlength)
+ return MP_BAD_MESSAGE_LENGTH;
+
+- if (ulength <= buflength) {
+- Py_BEGIN_ALLOW_THREADS
+- res = _conn_recvall(conn->handle, buffer, (size_t)ulength);
+- Py_END_ALLOW_THREADS
+- return res < 0 ? res : ulength;
+- } else {
+- *newbuffer = PyMem_Malloc((size_t)ulength);
+- if (*newbuffer == NULL)
++ if (ulength > buflength) {
++ *newbuffer = buffer = PyMem_Malloc((size_t)ulength);
++ if (buffer == NULL)
+ return MP_MEMORY_ERROR;
+- Py_BEGIN_ALLOW_THREADS
+- res = _conn_recvall(conn->handle, *newbuffer, (size_t)ulength);
+- Py_END_ALLOW_THREADS
+- return res < 0 ? (Py_ssize_t)res : (Py_ssize_t)ulength;
+ }
++
++ Py_BEGIN_ALLOW_THREADS
++ res = _conn_recvall(conn->handle, buffer, (size_t)ulength);
++ Py_END_ALLOW_THREADS
++
++ if (res >= 0) {
++ res = (Py_ssize_t)ulength;
++ } else if (*newbuffer != NULL) {
++ PyMem_Free(*newbuffer);
++ *newbuffer = NULL;
++ }
++ return res;
+ }
+
+ /*
+diff -r 70274d53c1dd Modules/_multiprocessing/win32_functions.c
+--- a/Modules/_multiprocessing/win32_functions.c
++++ b/Modules/_multiprocessing/win32_functions.c
+@@ -244,6 +244,7 @@
+ Py_INCREF(&Win32Type);
+
+ WIN32_CONSTANT(F_DWORD, ERROR_ALREADY_EXISTS);
++ WIN32_CONSTANT(F_DWORD, ERROR_NO_DATA);
+ WIN32_CONSTANT(F_DWORD, ERROR_PIPE_BUSY);
+ WIN32_CONSTANT(F_DWORD, ERROR_PIPE_CONNECTED);
+ WIN32_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT);
+diff -r 70274d53c1dd Modules/_randommodule.c
+--- a/Modules/_randommodule.c
++++ b/Modules/_randommodule.c
+@@ -400,7 +400,7 @@
+ long i, j;
+ PyObject *iobj;
+ PyObject *remobj;
+- unsigned long *mt, tmp;
++ unsigned long *mt, tmp, nonzero;
+
+ if (!PyInt_Check(n) && !PyLong_Check(n)) {
+ PyErr_Format(PyExc_TypeError, "jumpahead requires an "
+@@ -427,8 +427,23 @@
+ mt[j] = tmp;
+ }
+
+- for (i = 0; i < N; i++)
++ nonzero = 0;
++ for (i = 1; i < N; i++) {
+ mt[i] += i+1;
++ mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
++ nonzero |= mt[i];
++ }
++
++ /* Ensure the state is nonzero: in the unlikely event that mt[1] through
++ mt[N-1] are all zero, set the MSB of mt[0] (see issue #14591). In the
++ normal case, we fall back to the pre-issue 14591 behaviour for mt[0]. */
++ if (nonzero) {
++ mt[0] += 1;
++ mt[0] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
++ }
++ else {
++ mt[0] = 0x80000000UL;
++ }
+
+ self->index = N;
+ Py_INCREF(Py_None);
+diff -r 70274d53c1dd Modules/_sqlite/connection.c
+--- a/Modules/_sqlite/connection.c
++++ b/Modules/_sqlite/connection.c
+@@ -549,7 +549,7 @@
+ } else if (py_val == Py_None) {
+ sqlite3_result_null(context);
+ } else if (PyInt_Check(py_val)) {
+- sqlite3_result_int64(context, (sqlite3_int64)PyInt_AsLong(py_val));
++ sqlite3_result_int64(context, (sqlite_int64)PyInt_AsLong(py_val));
+ } else if (PyLong_Check(py_val)) {
+ sqlite3_result_int64(context, PyLong_AsLongLong(py_val));
+ } else if (PyFloat_Check(py_val)) {
+@@ -580,7 +580,7 @@
+ sqlite3_value* cur_value;
+ PyObject* cur_py_value;
+ const char* val_str;
+- sqlite3_int64 val_int;
++ sqlite_int64 val_int;
+ Py_ssize_t buflen;
+ void* raw_buffer;
+
+diff -r 70274d53c1dd Modules/_struct.c
+--- a/Modules/_struct.c
++++ b/Modules/_struct.c
+@@ -1693,6 +1693,18 @@
+ return PyInt_FromSsize_t(self->s_size);
+ }
+
++PyDoc_STRVAR(s_sizeof__doc__,
++"S.__sizeof__() -> size of S in memory, in bytes");
++
++static PyObject *
++s_sizeof(PyStructObject *self, void *unused)
++{
++ Py_ssize_t size;
++
++ size = sizeof(PyStructObject) + sizeof(formatcode) * (self->s_len + 1);
++ return PyLong_FromSsize_t(size);
++}
++
+ /* List of functions */
+
+ static struct PyMethodDef s_methods[] = {
+@@ -1701,6 +1713,7 @@
+ {"unpack", s_unpack, METH_O, s_unpack__doc__},
+ {"unpack_from", (PyCFunction)s_unpack_from, METH_VARARGS|METH_KEYWORDS,
+ s_unpack_from__doc__},
++ {"__sizeof__", (PyCFunction)s_sizeof, METH_NOARGS, s_sizeof__doc__},
+ {NULL, NULL} /* sentinel */
+ };
+
+diff -r 70274d53c1dd Modules/arraymodule.c
+--- a/Modules/arraymodule.c
++++ b/Modules/arraymodule.c
+@@ -1533,6 +1533,19 @@
+ PyDoc_STRVAR(reduce_doc, "Return state information for pickling.");
+
+ static PyObject *
++array_sizeof(arrayobject *self, PyObject *unused)
++{
++ Py_ssize_t res;
++ res = sizeof(arrayobject) + self->allocated * self->ob_descr->itemsize;
++ return PyLong_FromSsize_t(res);
++}
++
++PyDoc_STRVAR(sizeof_doc,
++"__sizeof__() -> int\n\
++\n\
++Size of the array in memory, in bytes.");
++
++static PyObject *
+ array_get_typecode(arrayobject *a, void *closure)
+ {
+ char tc = a->ob_descr->typecode;
+@@ -1606,6 +1619,8 @@
+ #endif
+ {"write", (PyCFunction)array_tofile_as_write, METH_O,
+ tofile_doc},
++ {"__sizeof__", (PyCFunction)array_sizeof, METH_NOARGS,
++ sizeof_doc},
+ {NULL, NULL} /* sentinel */
+ };
+
+diff -r 70274d53c1dd Modules/errnomodule.c
+--- a/Modules/errnomodule.c
++++ b/Modules/errnomodule.c
+@@ -783,6 +783,9 @@
+ #ifdef WSAN
+ inscode(d, ds, de, "WSAN", WSAN, "Error WSAN");
+ #endif
++#ifdef ENOTSUP
++ inscode(d, ds, de, "ENOTSUP", ENOTSUP, "Operation not supported");
++#endif
+
+ Py_DECREF(de);
+ }
+diff -r 70274d53c1dd Modules/gcmodule.c
+--- a/Modules/gcmodule.c
++++ b/Modules/gcmodule.c
+@@ -111,6 +111,46 @@
+ http://mail.python.org/pipermail/python-dev/2008-June/080579.html
+ */
+
++/*
++ NOTE: about untracking of mutable objects.
++
++ Certain types of container cannot participate in a reference cycle, and
++ so do not need to be tracked by the garbage collector. Untracking these
++ objects reduces the cost of garbage collections. However, determining
++ which objects may be untracked is not free, and the costs must be
++ weighed against the benefits for garbage collection.
++
++ There are two possible strategies for when to untrack a container:
++
++ i) When the container is created.
++ ii) When the container is examined by the garbage collector.
++
++ Tuples containing only immutable objects (integers, strings etc, and
++ recursively, tuples of immutable objects) do not need to be tracked.
++ The interpreter creates a large number of tuples, many of which will
++ not survive until garbage collection. It is therefore not worthwhile
++ to untrack eligible tuples at creation time.
++
++ Instead, all tuples except the empty tuple are tracked when created.
++ During garbage collection it is determined whether any surviving tuples
++ can be untracked. A tuple can be untracked if all of its contents are
++ already not tracked. Tuples are examined for untracking in all garbage
++ collection cycles. It may take more than one cycle to untrack a tuple.
++
++ Dictionaries containing only immutable objects also do not need to be
++ tracked. Dictionaries are untracked when created. If a tracked item is
++ inserted into a dictionary (either as a key or value), the dictionary
++ becomes tracked. During a full garbage collection (all generations),
++ the collector will untrack any dictionaries whose contents are not
++ tracked.
++
++ The module provides the python function is_tracked(obj), which returns
++ the CURRENT tracking status of the object. Subsequent garbage
++ collections may change the tracking status of the object.
++
++ Untracking of certain containers was introduced in issue #4688, and
++ the algorithm was refined in response to issue #14775.
++*/
+
+ /* set for debugging information */
+ #define DEBUG_STATS (1<<0) /* print collection statistics */
+@@ -436,9 +476,6 @@
+ if (PyTuple_CheckExact(op)) {
+ _PyTuple_MaybeUntrack(op);
+ }
+- else if (PyDict_CheckExact(op)) {
+- _PyDict_MaybeUntrack(op);
+- }
+ }
+ else {
+ /* This *may* be unreachable. To make progress,
+@@ -478,6 +515,20 @@
+ return 0;
+ }
+
++/* Try to untrack all currently tracked dictionaries */
++static void
++untrack_dicts(PyGC_Head *head)
++{
++ PyGC_Head *next, *gc = head->gc.gc_next;
++ while (gc != head) {
++ PyObject *op = FROM_GC(gc);
++ next = gc->gc.gc_next;
++ if (PyDict_CheckExact(op))
++ _PyDict_MaybeUntrack(op);
++ gc = next;
++ }
++}
++
+ /* Move the objects in unreachable with __del__ methods into `finalizers`.
+ * Objects moved into `finalizers` have gc_refs set to GC_REACHABLE; the
+ * objects remaining in unreachable are left at GC_TENTATIVELY_UNREACHABLE.
+@@ -890,6 +941,9 @@
+ gc_list_merge(young, old);
+ }
+ else {
++ /* We only untrack dicts in full collections, to avoid quadratic
++ dict build-up. See issue #14775. */
++ untrack_dicts(young);
+ long_lived_pending = 0;
+ long_lived_total = gc_list_size(young);
+ }
+diff -r 70274d53c1dd Modules/getaddrinfo.c
+--- a/Modules/getaddrinfo.c
++++ b/Modules/getaddrinfo.c
+@@ -430,7 +430,7 @@
+ break;
+ #ifdef ENABLE_IPV6
+ case AF_INET6:
+- pfx = ((struct in6_addr *)pton)->s6_addr8[0];
++ pfx = ((struct in6_addr *)pton)->s6_addr[0];
+ if (pfx == 0 || pfx == 0xfe || pfx == 0xff)
+ pai->ai_flags &= ~AI_CANONNAME;
+ break;
+diff -r 70274d53c1dd Modules/getnameinfo.c
+--- a/Modules/getnameinfo.c
++++ b/Modules/getnameinfo.c
+@@ -161,7 +161,7 @@
+ break;
+ #ifdef ENABLE_IPV6
+ case AF_INET6:
+- pfx = ((struct sockaddr_in6 *)sa)->sin6_addr.s6_addr8[0];
++ pfx = ((struct sockaddr_in6 *)sa)->sin6_addr.s6_addr[0];
+ if (pfx == 0 || pfx == 0xfe || pfx == 0xff)
+ flags |= NI_NUMERICHOST;
+ break;
+diff -r 70274d53c1dd Modules/main.c
+--- a/Modules/main.c
++++ b/Modules/main.c
+@@ -583,7 +583,7 @@
+ sts = PyRun_SimpleStringFlags(command, &cf) != 0;
+ free(command);
+ } else if (module) {
+- sts = RunModule(module, 1);
++ sts = (RunModule(module, 1) != 0);
+ free(module);
+ }
+ else {
+diff -r 70274d53c1dd Modules/md5module.c
+--- a/Modules/md5module.c
++++ b/Modules/md5module.c
+@@ -262,6 +262,8 @@
+ {
+ md5object *md5p;
+ Py_buffer view = { 0 };
++ Py_ssize_t n;
++ unsigned char *buf;
+
+ if (!PyArg_ParseTuple(args, "|s*:new", &view))
+ return NULL;
+@@ -271,9 +273,18 @@
+ return NULL;
+ }
+
+- if (view.len > 0) {
+- md5_append(&md5p->md5, (unsigned char*)view.buf,
+- Py_SAFE_DOWNCAST(view.len, Py_ssize_t, unsigned int));
++ n = view.len;
++ buf = (unsigned char *) view.buf;
++ while (n > 0) {
++ Py_ssize_t nbytes;
++ if (n > INT_MAX)
++ nbytes = INT_MAX;
++ else
++ nbytes = n;
++ md5_append(&md5p->md5, buf,
++ Py_SAFE_DOWNCAST(nbytes, Py_ssize_t, unsigned int));
++ buf += nbytes;
++ n -= nbytes;
+ }
+ PyBuffer_Release(&view);
+
+diff -r 70274d53c1dd Modules/parsermodule.c
+--- a/Modules/parsermodule.c
++++ b/Modules/parsermodule.c
+@@ -169,9 +169,33 @@
+
+
+ static void parser_free(PyST_Object *st);
++static PyObject* parser_sizeof(PyST_Object *, void *);
+ static int parser_compare(PyST_Object *left, PyST_Object *right);
+ static PyObject *parser_getattr(PyObject *self, char *name);
+-
++static PyObject* parser_compilest(PyST_Object *, PyObject *, PyObject *);
++static PyObject* parser_isexpr(PyST_Object *, PyObject *, PyObject *);
++static PyObject* parser_issuite(PyST_Object *, PyObject *, PyObject *);
++static PyObject* parser_st2list(PyST_Object *, PyObject *, PyObject *);
++static PyObject* parser_st2tuple(PyST_Object *, PyObject *, PyObject *);
++
++#define PUBLIC_METHOD_TYPE (METH_VARARGS|METH_KEYWORDS)
++
++static PyMethodDef
++parser_methods[] = {
++ {"compile", (PyCFunction)parser_compilest, PUBLIC_METHOD_TYPE,
++ PyDoc_STR("Compile this ST object into a code object.")},
++ {"isexpr", (PyCFunction)parser_isexpr, PUBLIC_METHOD_TYPE,
++ PyDoc_STR("Determines if this ST object was created from an expression.")},
++ {"issuite", (PyCFunction)parser_issuite, PUBLIC_METHOD_TYPE,
++ PyDoc_STR("Determines if this ST object was created from a suite.")},
++ {"tolist", (PyCFunction)parser_st2list, PUBLIC_METHOD_TYPE,
++ PyDoc_STR("Creates a list-tree representation of this ST.")},
++ {"totuple", (PyCFunction)parser_st2tuple, PUBLIC_METHOD_TYPE,
++ PyDoc_STR("Creates a tuple-tree representation of this ST.")},
++ {"__sizeof__", (PyCFunction)parser_sizeof, METH_NOARGS,
++ PyDoc_STR("Returns size in memory, in bytes.")},
++ {NULL, NULL, 0, NULL}
++};
+
+ static
+ PyTypeObject PyST_Type = {
+@@ -200,7 +224,14 @@
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+
+ /* __doc__ */
+- "Intermediate representation of a Python parse tree."
++ "Intermediate representation of a Python parse tree.",
++ 0, /* tp_traverse */
++ 0, /* tp_clear */
++ 0, /* tp_richcompare */
++ 0, /* tp_weaklistoffset */
++ 0, /* tp_iter */
++ 0, /* tp_iternext */
++ parser_methods, /* tp_methods */
+ }; /* PyST_Type */
+
+
+@@ -494,25 +525,6 @@
+ }
+
+
+-#define PUBLIC_METHOD_TYPE (METH_VARARGS|METH_KEYWORDS)
+-
+-static PyMethodDef
+-parser_methods[] = {
+- {"compile", (PyCFunction)parser_compilest, PUBLIC_METHOD_TYPE,
+- PyDoc_STR("Compile this ST object into a code object.")},
+- {"isexpr", (PyCFunction)parser_isexpr, PUBLIC_METHOD_TYPE,
+- PyDoc_STR("Determines if this ST object was created from an expression.")},
+- {"issuite", (PyCFunction)parser_issuite, PUBLIC_METHOD_TYPE,
+- PyDoc_STR("Determines if this ST object was created from a suite.")},
+- {"tolist", (PyCFunction)parser_st2list, PUBLIC_METHOD_TYPE,
+- PyDoc_STR("Creates a list-tree representation of this ST.")},
+- {"totuple", (PyCFunction)parser_st2tuple, PUBLIC_METHOD_TYPE,
+- PyDoc_STR("Creates a tuple-tree representation of this ST.")},
+-
+- {NULL, NULL, 0, NULL}
+-};
+-
+-
+ static PyObject*
+ parser_getattr(PyObject *self, char *name)
+ {
+@@ -695,6 +707,15 @@
+ return parser_tuple2st(self, args, kw);
+ }
+
++static PyObject *
++parser_sizeof(PyST_Object *st, void *unused)
++{
++ Py_ssize_t res;
++
++ res = sizeof(PyST_Object) + _PyNode_SizeOf(st->st_node);
++ return PyLong_FromSsize_t(res);
++}
++
+
+ /* node* build_node_children()
+ *
+diff -r 70274d53c1dd Modules/socketmodule.c
+--- a/Modules/socketmodule.c
++++ b/Modules/socketmodule.c
+@@ -761,7 +761,7 @@
+ /* Lock to allow python interpreter to continue, but only allow one
+ thread to be in gethostbyname or getaddrinfo */
+ #if defined(USE_GETHOSTBYNAME_LOCK) || defined(USE_GETADDRINFO_LOCK)
+-PyThread_type_lock netdb_lock;
++static PyThread_type_lock netdb_lock;
+ #endif
+
+
+@@ -1310,7 +1310,7 @@
+ "getsockaddrarg: port must be 0-65535.");
+ return 0;
+ }
+- if (flowinfo < 0 || flowinfo > 0xfffff) {
++ if (flowinfo > 0xfffff) {
+ PyErr_SetString(
+ PyExc_OverflowError,
+ "getsockaddrarg: flowinfo must be 0-1048575.");
+@@ -4181,7 +4181,7 @@
+ if (!PyArg_ParseTuple(sa, "si|II",
+ &hostp, &port, &flowinfo, &scope_id))
+ return NULL;
+- if (flowinfo < 0 || flowinfo > 0xfffff) {
++ if (flowinfo > 0xfffff) {
+ PyErr_SetString(PyExc_OverflowError,
+ "getsockaddrarg: flowinfo must be 0-1048575.");
+ return NULL;
+diff -r 70274d53c1dd Modules/threadmodule.c
+--- a/Modules/threadmodule.c
++++ b/Modules/threadmodule.c
+@@ -618,6 +618,8 @@
+ PyErr_Clear();
+ else {
+ PyObject *file;
++ PyObject *exc, *value, *tb;
++ PyErr_Fetch(&exc, &value, &tb);
+ PySys_WriteStderr(
+ "Unhandled exception in thread started by ");
+ file = PySys_GetObject("stderr");
+@@ -626,6 +628,7 @@
+ else
+ PyObject_Print(boot->func, stderr, 0);
+ PySys_WriteStderr("\n");
++ PyErr_Restore(exc, value, tb);
+ PyErr_PrintEx(0);
+ }
+ }
+diff -r 70274d53c1dd Objects/abstract.c
+--- a/Objects/abstract.c
++++ b/Objects/abstract.c
+@@ -126,7 +126,7 @@
+ PyErr_Clear();
+ return defaultvalue;
+ }
+- rv = PyLong_Check(ro) ? PyLong_AsSsize_t(ro) : defaultvalue;
++ rv = PyNumber_Check(ro) ? PyInt_AsSsize_t(ro) : defaultvalue;
+ Py_DECREF(ro);
+ return rv;
+ }
+diff -r 70274d53c1dd Objects/bytearrayobject.c
+--- a/Objects/bytearrayobject.c
++++ b/Objects/bytearrayobject.c
+@@ -2296,8 +2296,10 @@
+ }
+
+ bytearray_obj = PyByteArray_FromStringAndSize(NULL, buf_size);
+- if (bytearray_obj == NULL)
++ if (bytearray_obj == NULL) {
++ Py_DECREF(it);
+ return NULL;
++ }
+ buf = PyByteArray_AS_STRING(bytearray_obj);
+
+ while ((item = PyIter_Next(it)) != NULL) {
+@@ -2330,8 +2332,10 @@
+ return NULL;
+ }
+
+- if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1)
++ if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) {
++ Py_DECREF(bytearray_obj);
+ return NULL;
++ }
+ Py_DECREF(bytearray_obj);
+
+ Py_RETURN_NONE;
+@@ -2645,7 +2649,7 @@
+ }
+
+ PyDoc_STRVAR(splitlines__doc__,
+-"B.splitlines([keepends]) -> list of lines\n\
++"B.splitlines(keepends=False) -> list of lines\n\
+ \n\
+ Return a list of the lines in B, breaking at line boundaries.\n\
+ Line breaks are not included in the resulting list unless keepends\n\
+diff -r 70274d53c1dd Objects/classobject.c
+--- a/Objects/classobject.c
++++ b/Objects/classobject.c
+@@ -225,10 +225,16 @@
+ class_getattr(register PyClassObject *op, PyObject *name)
+ {
+ register PyObject *v;
+- register char *sname = PyString_AsString(name);
++ register char *sname;
+ PyClassObject *klass;
+ descrgetfunc f;
+
++ if (!PyString_Check(name)) {
++ PyErr_SetString(PyExc_TypeError, "attribute name must be a string");
++ return NULL;
++ }
++
++ sname = PyString_AsString(name);
+ if (sname[0] == '_' && sname[1] == '_') {
+ if (strcmp(sname, "__dict__") == 0) {
+ if (PyEval_GetRestricted()) {
+@@ -336,6 +342,10 @@
+ "classes are read-only in restricted mode");
+ return -1;
+ }
++ if (!PyString_Check(name)) {
++ PyErr_SetString(PyExc_TypeError, "attribute name must be a string");
++ return -1;
++ }
+ sname = PyString_AsString(name);
+ if (sname[0] == '_' && sname[1] == '_') {
+ Py_ssize_t n = PyString_Size(name);
+@@ -699,7 +709,14 @@
+ instance_getattr1(register PyInstanceObject *inst, PyObject *name)
+ {
+ register PyObject *v;
+- register char *sname = PyString_AsString(name);
++ register char *sname;
++
++ if (!PyString_Check(name)) {
++ PyErr_SetString(PyExc_TypeError, "attribute name must be a string");
++ return NULL;
++ }
++
++ sname = PyString_AsString(name);
+ if (sname[0] == '_' && sname[1] == '_') {
+ if (strcmp(sname, "__dict__") == 0) {
+ if (PyEval_GetRestricted()) {
+@@ -810,7 +827,14 @@
+ instance_setattr(PyInstanceObject *inst, PyObject *name, PyObject *v)
+ {
+ PyObject *func, *args, *res, *tmp;
+- char *sname = PyString_AsString(name);
++ char *sname;
++
++ if (!PyString_Check(name)) {
++ PyErr_SetString(PyExc_TypeError, "attribute name must be a string");
++ return -1;
++ }
++
++ sname = PyString_AsString(name);
+ if (sname[0] == '_' && sname[1] == '_') {
+ Py_ssize_t n = PyString_Size(name);
+ if (sname[n-1] == '_' && sname[n-2] == '_') {
+diff -r 70274d53c1dd Objects/descrobject.c
+--- a/Objects/descrobject.c
++++ b/Objects/descrobject.c
+@@ -254,14 +254,51 @@
+ classmethoddescr_call(PyMethodDescrObject *descr, PyObject *args,
+ PyObject *kwds)
+ {
+- PyObject *func, *result;
++ Py_ssize_t argc;
++ PyObject *self, *func, *result;
+
+- func = PyCFunction_New(descr->d_method, (PyObject *)descr->d_type);
++ /* Make sure that the first argument is acceptable as 'self' */
++ assert(PyTuple_Check(args));
++ argc = PyTuple_GET_SIZE(args);
++ if (argc < 1) {
++ PyErr_Format(PyExc_TypeError,
++ "descriptor '%s' of '%.100s' "
++ "object needs an argument",
++ descr_name((PyDescrObject *)descr),
++ descr->d_type->tp_name);
++ return NULL;
++ }
++ self = PyTuple_GET_ITEM(args, 0);
++ if (!PyType_Check(self)) {
++ PyErr_Format(PyExc_TypeError,
++ "descriptor '%s' requires a type "
++ "but received a '%.100s'",
++ descr_name((PyDescrObject *)descr),
++ self->ob_type->tp_name);
++ return NULL;
++ }
++ if (!PyType_IsSubtype((PyTypeObject *)self, descr->d_type)) {
++ PyErr_Format(PyExc_TypeError,
++ "descriptor '%s' "
++ "requires a subtype of '%.100s' "
++ "but received '%.100s",
++ descr_name((PyDescrObject *)descr),
++ descr->d_type->tp_name,
++ self->ob_type->tp_name);
++ return NULL;
++ }
++
++ func = PyCFunction_New(descr->d_method, self);
+ if (func == NULL)
+ return NULL;
+-
++ args = PyTuple_GetSlice(args, 1, argc);
++ if (args == NULL) {
++ Py_DECREF(func);
++ return NULL;
++ }
+ result = PyEval_CallObjectWithKeywords(func, args, kwds);
+ Py_DECREF(func);
++ Py_DECREF(args);
+ return result;
+ }
+
+diff -r 70274d53c1dd Objects/dictobject.c
+--- a/Objects/dictobject.c
++++ b/Objects/dictobject.c
+@@ -502,27 +502,16 @@
+ _PyObject_GC_UNTRACK(op);
+ }
+
+-
+ /*
+-Internal routine to insert a new item into the table.
+-Used both by the internal resize routine and by the public insert routine.
+-Eats a reference to key and one to value.
+-Returns -1 if an error occurred, or 0 on success.
++Internal routine to insert a new item into the table when you have entry object.
++Used by insertdict.
+ */
+ static int
+-insertdict(register PyDictObject *mp, PyObject *key, long hash, PyObject *value)
++insertdict_by_entry(register PyDictObject *mp, PyObject *key, long hash,
++ PyDictEntry *ep, PyObject *value)
+ {
+ PyObject *old_value;
+- register PyDictEntry *ep;
+- typedef PyDictEntry *(*lookupfunc)(PyDictObject *, PyObject *, long);
+-
+- assert(mp->ma_lookup != NULL);
+- ep = mp->ma_lookup(mp, key, hash);
+- if (ep == NULL) {
+- Py_DECREF(key);
+- Py_DECREF(value);
+- return -1;
+- }
++
+ MAINTAIN_TRACKING(mp, key, value);
+ if (ep->me_value != NULL) {
+ old_value = ep->me_value;
+@@ -545,6 +534,28 @@
+ return 0;
+ }
+
++
++/*
++Internal routine to insert a new item into the table.
++Used both by the internal resize routine and by the public insert routine.
++Eats a reference to key and one to value.
++Returns -1 if an error occurred, or 0 on success.
++*/
++static int
++insertdict(register PyDictObject *mp, PyObject *key, long hash, PyObject *value)
++{
++ register PyDictEntry *ep;
++
++ assert(mp->ma_lookup != NULL);
++ ep = mp->ma_lookup(mp, key, hash);
++ if (ep == NULL) {
++ Py_DECREF(key);
++ Py_DECREF(value);
++ return -1;
++ }
++ return insertdict_by_entry(mp, key, hash, ep, value);
++}
++
+ /*
+ Internal routine used by dictresize() to insert an item which is
+ known to be absent from the dict. This routine also assumes that
+@@ -738,42 +749,26 @@
+ return ep->me_value;
+ }
+
+-/* CAUTION: PyDict_SetItem() must guarantee that it won't resize the
+- * dictionary if it's merely replacing the value for an existing key.
+- * This means that it's safe to loop over a dictionary with PyDict_Next()
+- * and occasionally replace a value -- but you can't insert new keys or
+- * remove them.
+- */
+-int
+-PyDict_SetItem(register PyObject *op, PyObject *key, PyObject *value)
++static int
++dict_set_item_by_hash_or_entry(register PyObject *op, PyObject *key,
++ long hash, PyDictEntry *ep, PyObject *value)
+ {
+ register PyDictObject *mp;
+- register long hash;
+ register Py_ssize_t n_used;
+
+- if (!PyDict_Check(op)) {
+- PyErr_BadInternalCall();
+- return -1;
+- }
+- assert(key);
+- assert(value);
+ mp = (PyDictObject *)op;
+- if (PyString_CheckExact(key)) {
+- hash = ((PyStringObject *)key)->ob_shash;
+- if (hash == -1)
+- hash = PyObject_Hash(key);
+- }
+- else {
+- hash = PyObject_Hash(key);
+- if (hash == -1)
+- return -1;
+- }
+ assert(mp->ma_fill <= mp->ma_mask); /* at least one empty slot */
+ n_used = mp->ma_used;
+ Py_INCREF(value);
+ Py_INCREF(key);
+- if (insertdict(mp, key, hash, value) != 0)
+- return -1;
++ if (ep == NULL) {
++ if (insertdict(mp, key, hash, value) != 0)
++ return -1;
++ }
++ else {
++ if (insertdict_by_entry(mp, key, hash, ep, value) != 0)
++ return -1;
++ }
+ /* If we added a key, we can safely resize. Otherwise just return!
+ * If fill >= 2/3 size, adjust size. Normally, this doubles or
+ * quaduples the size, but it's also possible for the dict to shrink
+@@ -793,6 +788,36 @@
+ return dictresize(mp, (mp->ma_used > 50000 ? 2 : 4) * mp->ma_used);
+ }
+
++/* CAUTION: PyDict_SetItem() must guarantee that it won't resize the
++ * dictionary if it's merely replacing the value for an existing key.
++ * This means that it's safe to loop over a dictionary with PyDict_Next()
++ * and occasionally replace a value -- but you can't insert new keys or
++ * remove them.
++ */
++int
++PyDict_SetItem(register PyObject *op, PyObject *key, PyObject *value)
++{
++ register long hash;
++
++ if (!PyDict_Check(op)) {
++ PyErr_BadInternalCall();
++ return -1;
++ }
++ assert(key);
++ assert(value);
++ if (PyString_CheckExact(key)) {
++ hash = ((PyStringObject *)key)->ob_shash;
++ if (hash == -1)
++ hash = PyObject_Hash(key);
++ }
++ else {
++ hash = PyObject_Hash(key);
++ if (hash == -1)
++ return -1;
++ }
++ return dict_set_item_by_hash_or_entry(op, key, hash, NULL, value);
++}
++
+ int
+ PyDict_DelItem(PyObject *op, PyObject *key)
+ {
+@@ -1957,9 +1982,9 @@
+ return NULL;
+ val = ep->me_value;
+ if (val == NULL) {
+- val = failobj;
+- if (PyDict_SetItem((PyObject*)mp, key, failobj))
+- val = NULL;
++ if (dict_set_item_by_hash_or_entry((PyObject*)mp, key, hash, ep,
++ failobj) == 0)
++ val = failobj;
+ }
+ Py_XINCREF(val);
+ return val;
+diff -r 70274d53c1dd Objects/fileobject.c
+--- a/Objects/fileobject.c
++++ b/Objects/fileobject.c
+@@ -493,9 +493,10 @@
+ PyObject *
+ PyFile_FromString(char *name, char *mode)
+ {
++ extern int fclose(FILE *);
+ PyFileObject *f;
+
+- f = (PyFileObject *)PyFile_FromFile((FILE *)NULL, name, mode, NULL);
++ f = (PyFileObject *)PyFile_FromFile((FILE *)NULL, name, mode, fclose);
+ if (f != NULL) {
+ if (open_the_file(f, name, mode) == NULL) {
+ Py_DECREF(f);
+@@ -635,11 +636,13 @@
+ static PyObject *
+ file_repr(PyFileObject *f)
+ {
++ PyObject *ret = NULL;
++ PyObject *name = NULL;
+ if (PyUnicode_Check(f->f_name)) {
+ #ifdef Py_USING_UNICODE
+- PyObject *ret = NULL;
+- PyObject *name = PyUnicode_AsUnicodeEscapeString(f->f_name);
+- const char *name_str = name ? PyString_AsString(name) : "?";
++ const char *name_str;
++ name = PyUnicode_AsUnicodeEscapeString(f->f_name);
++ name_str = name ? PyString_AsString(name) : "?";
+ ret = PyString_FromFormat("<%s file u'%s', mode '%s' at %p>",
+ f->f_fp == NULL ? "closed" : "open",
+ name_str,
+@@ -649,11 +652,16 @@
+ return ret;
+ #endif
+ } else {
+- return PyString_FromFormat("<%s file '%s', mode '%s' at %p>",
++ name = PyObject_Repr(f->f_name);
++ if (name == NULL)
++ return NULL;
++ ret = PyString_FromFormat("<%s file %s, mode '%s' at %p>",
+ f->f_fp == NULL ? "closed" : "open",
+- PyString_AsString(f->f_name),
++ PyString_AsString(name),
+ PyString_AsString(f->f_mode),
+ f);
++ Py_XDECREF(name);
++ return ret;
+ }
+ }
+
+@@ -1072,12 +1080,23 @@
+ return NULL;
+ bytesread = 0;
+ for (;;) {
++ int interrupted;
+ FILE_BEGIN_ALLOW_THREADS(f)
+ errno = 0;
+ chunksize = Py_UniversalNewlineFread(BUF(v) + bytesread,
+ buffersize - bytesread, f->f_fp, (PyObject *)f);
++ interrupted = ferror(f->f_fp) && errno == EINTR;
+ FILE_END_ALLOW_THREADS(f)
++ if (interrupted) {
++ clearerr(f->f_fp);
++ if (PyErr_CheckSignals()) {
++ Py_DECREF(v);
++ return NULL;
++ }
++ }
+ if (chunksize == 0) {
++ if (interrupted)
++ continue;
+ if (!ferror(f->f_fp))
+ break;
+ clearerr(f->f_fp);
+@@ -1092,7 +1111,7 @@
+ return NULL;
+ }
+ bytesread += chunksize;
+- if (bytesread < buffersize) {
++ if (bytesread < buffersize && !interrupted) {
+ clearerr(f->f_fp);
+ break;
+ }
+@@ -1133,12 +1152,23 @@
+ ntodo = pbuf.len;
+ ndone = 0;
+ while (ntodo > 0) {
++ int interrupted;
+ FILE_BEGIN_ALLOW_THREADS(f)
+ errno = 0;
+ nnow = Py_UniversalNewlineFread(ptr+ndone, ntodo, f->f_fp,
+ (PyObject *)f);
++ interrupted = ferror(f->f_fp) && errno == EINTR;
+ FILE_END_ALLOW_THREADS(f)
++ if (interrupted) {
++ clearerr(f->f_fp);
++ if (PyErr_CheckSignals()) {
++ PyBuffer_Release(&pbuf);
++ return NULL;
++ }
++ }
+ if (nnow == 0) {
++ if (interrupted)
++ continue;
+ if (!ferror(f->f_fp))
+ break;
+ PyErr_SetFromErrno(PyExc_IOError);
+@@ -1426,8 +1456,25 @@
+ *buf++ = c;
+ if (c == '\n') break;
+ }
+- if ( c == EOF && skipnextlf )
+- newlinetypes |= NEWLINE_CR;
++ if (c == EOF) {
++ if (ferror(fp) && errno == EINTR) {
++ FUNLOCKFILE(fp);
++ FILE_ABORT_ALLOW_THREADS(f)
++ f->f_newlinetypes = newlinetypes;
++ f->f_skipnextlf = skipnextlf;
++
++ if (PyErr_CheckSignals()) {
++ Py_DECREF(v);
++ return NULL;
++ }
++ /* We executed Python signal handlers and got no exception.
++ * Now back to reading the line where we left off. */
++ clearerr(fp);
++ continue;
++ }
++ if (skipnextlf)
++ newlinetypes |= NEWLINE_CR;
++ }
+ } else /* If not universal newlines use the normal loop */
+ while ((c = GETC(fp)) != EOF &&
+ (*buf++ = c) != '\n' &&
+@@ -1441,6 +1488,16 @@
+ break;
+ if (c == EOF) {
+ if (ferror(fp)) {
++ if (errno == EINTR) {
++ if (PyErr_CheckSignals()) {
++ Py_DECREF(v);
++ return NULL;
++ }
++ /* We executed Python signal handlers and got no exception.
++ * Now back to reading the line where we left off. */
++ clearerr(fp);
++ continue;
++ }
+ PyErr_SetFromErrno(PyExc_IOError);
+ clearerr(fp);
+ Py_DECREF(v);
+@@ -1616,7 +1673,7 @@
+ size_t totalread = 0;
+ char *p, *q, *end;
+ int err;
+- int shortread = 0;
++ int shortread = 0; /* bool, did the previous read come up short? */
+
+ if (f->f_fp == NULL)
+ return err_closed();
+@@ -1646,6 +1703,14 @@
+ sizehint = 0;
+ if (!ferror(f->f_fp))
+ break;
++ if (errno == EINTR) {
++ if (PyErr_CheckSignals()) {
++ goto error;
++ }
++ clearerr(f->f_fp);
++ shortread = 0;
++ continue;
++ }
+ PyErr_SetFromErrno(PyExc_IOError);
+ clearerr(f->f_fp);
+ goto error;
+diff -r 70274d53c1dd Objects/frameobject.c
+--- a/Objects/frameobject.c
++++ b/Objects/frameobject.c
+@@ -214,6 +214,7 @@
+ case SETUP_LOOP:
+ case SETUP_EXCEPT:
+ case SETUP_FINALLY:
++ case SETUP_WITH:
+ blockstack[blockstack_top++] = addr;
+ in_finally[blockstack_top-1] = 0;
+ break;
+@@ -221,7 +222,7 @@
+ case POP_BLOCK:
+ assert(blockstack_top > 0);
+ setup_op = code[blockstack[blockstack_top-1]];
+- if (setup_op == SETUP_FINALLY) {
++ if (setup_op == SETUP_FINALLY || setup_op == SETUP_WITH) {
+ in_finally[blockstack_top-1] = 1;
+ }
+ else {
+@@ -236,7 +237,7 @@
+ * be seeing such an END_FINALLY.) */
+ if (blockstack_top > 0) {
+ setup_op = code[blockstack[blockstack_top-1]];
+- if (setup_op == SETUP_FINALLY) {
++ if (setup_op == SETUP_FINALLY || setup_op == SETUP_WITH) {
+ blockstack_top--;
+ }
+ }
+@@ -298,6 +299,7 @@
+ case SETUP_LOOP:
+ case SETUP_EXCEPT:
+ case SETUP_FINALLY:
++ case SETUP_WITH:
+ delta_iblock++;
+ break;
+
+diff -r 70274d53c1dd Objects/genobject.c
+--- a/Objects/genobject.c
++++ b/Objects/genobject.c
+@@ -120,7 +120,7 @@
+ }
+
+ PyDoc_STRVAR(close_doc,
+-"close(arg) -> raise GeneratorExit inside generator.");
++"close() -> raise GeneratorExit inside generator.");
+
+ static PyObject *
+ gen_close(PyGenObject *gen, PyObject *args)
+diff -r 70274d53c1dd Objects/object.c
+--- a/Objects/object.c
++++ b/Objects/object.c
+@@ -2111,8 +2111,10 @@
+ if (PyType_Ready(&PySet_Type) < 0)
+ Py_FatalError("Can't initialize set type");
+
++#ifdef Py_USING_UNICODE
+ if (PyType_Ready(&PyUnicode_Type) < 0)
+ Py_FatalError("Can't initialize unicode type");
++#endif
+
+ if (PyType_Ready(&PySlice_Type) < 0)
+ Py_FatalError("Can't initialize slice type");
+diff -r 70274d53c1dd Objects/stringobject.c
+--- a/Objects/stringobject.c
++++ b/Objects/stringobject.c
+@@ -3545,7 +3545,7 @@
+
+
+ PyDoc_STRVAR(splitlines__doc__,
+-"S.splitlines([keepends]) -> list of strings\n\
++"S.splitlines(keepends=False) -> list of strings\n\
+ \n\
+ Return a list of the lines in S, breaking at line boundaries.\n\
+ Line breaks are not included in the resulting list unless keepends\n\
+diff -r 70274d53c1dd Objects/typeobject.c
+--- a/Objects/typeobject.c
++++ b/Objects/typeobject.c
+@@ -876,8 +876,13 @@
+ assert(base);
+ }
+
+- /* There's no need to clear the instance dict (if any);
+- the collector will call its tp_clear handler. */
++ /* Clear the instance dict (if any), to break cycles involving only
++ __dict__ slots (as in the case 'self.__dict__ is self'). */
++ if (type->tp_dictoffset != base->tp_dictoffset) {
++ PyObject **dictptr = _PyObject_GetDictPtr(self);
++ if (dictptr && *dictptr)
++ Py_CLEAR(*dictptr);
++ }
+
+ if (baseclear)
+ return baseclear(self);
+@@ -2525,6 +2530,13 @@
+ PyObject *meta_attribute, *attribute;
+ descrgetfunc meta_get;
+
++ if (!PyString_Check(name)) {
++ PyErr_Format(PyExc_TypeError,
++ "attribute name must be string, not '%.200s'",
++ name->ob_type->tp_name);
++ return NULL;
++ }
++
+ /* Initialize this type (we'll assume the metatype is initialized) */
+ if (type->tp_dict == NULL) {
+ if (PyType_Ready(type) < 0)
+@@ -2984,7 +2996,7 @@
+ unaryfunc f;
+
+ f = Py_TYPE(self)->tp_repr;
+- if (f == NULL || f == object_str)
++ if (f == NULL)
+ f = object_repr;
+ return f(self);
+ }
+@@ -3553,6 +3565,7 @@
+
+ for (; meth->ml_name != NULL; meth++) {
+ PyObject *descr;
++ int err;
+ if (PyDict_GetItemString(dict, meth->ml_name) &&
+ !(meth->ml_flags & METH_COEXIST))
+ continue;
+@@ -3576,9 +3589,10 @@
+ }
+ if (descr == NULL)
+ return -1;
+- if (PyDict_SetItemString(dict, meth->ml_name, descr) < 0)
++ err = PyDict_SetItemString(dict, meth->ml_name, descr);
++ Py_DECREF(descr);
++ if (err < 0)
+ return -1;
+- Py_DECREF(descr);
+ }
+ return 0;
+ }
+@@ -6131,7 +6145,8 @@
+ }
+ continue;
+ }
+- if (Py_TYPE(descr) == &PyWrapperDescr_Type) {
++ if (Py_TYPE(descr) == &PyWrapperDescr_Type &&
++ ((PyWrapperDescrObject *)descr)->d_base->name_strobj == p->name_strobj) {
+ void **tptr = resolve_slotdups(type, p->name_strobj);
+ if (tptr == NULL || tptr == ptr)
+ generic = p->function;
+diff -r 70274d53c1dd Objects/unicodeobject.c
+--- a/Objects/unicodeobject.c
++++ b/Objects/unicodeobject.c
+@@ -2564,7 +2564,7 @@
+ }
+
+ /* UTF-16 code pair: */
+- if (q >= e) {
++ if (e - q < 2) {
+ errmsg = "unexpected end of data";
+ startinpos = (((const char *)q)-2)-starts;
+ endinpos = ((const char *)e)-starts;
+@@ -7521,7 +7521,7 @@
+ }
+
+ PyDoc_STRVAR(splitlines__doc__,
+- "S.splitlines([keepends]) -> list of strings\n\
++ "S.splitlines(keepends=False) -> list of strings\n\
+ \n\
+ Return a list of the lines in S, breaking at line boundaries.\n\
+ Line breaks are not included in the resulting list unless keepends\n\
+@@ -7797,10 +7797,6 @@
+
+
+ static PyMethodDef unicode_methods[] = {
+-
+- /* Order is according to common usage: often used methods should
+- appear first, since lookup is done sequentially. */
+-
+ {"encode", (PyCFunction) unicode_encode, METH_VARARGS | METH_KEYWORDS, encode__doc__},
+ {"replace", (PyCFunction) unicode_replace, METH_VARARGS, replace__doc__},
+ {"split", (PyCFunction) unicode_split, METH_VARARGS, split__doc__},
+diff -r 70274d53c1dd PC/VC6/bz2.dsp
+--- a/PC/VC6/bz2.dsp
++++ b/PC/VC6/bz2.dsp
+@@ -44,7 +44,7 @@
+ # PROP Target_Dir ""
+ F90=df.exe
+ # ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "Py_BUILD_CORE_MODULE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
+-# ADD CPP /nologo /MD /W3 /GX /Zi /O2 /I "..\..\Include" /I ".." /I "..\..\..\bzip2-1.0.5" /D "Py_BUILD_CORE_MODULE" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /YX /FD /c
++# ADD CPP /nologo /MD /W3 /GX /Zi /O2 /I "..\..\Include" /I ".." /I "..\..\..\bzip2-1.0.6" /D "Py_BUILD_CORE_MODULE" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /YX /FD /c
+ # ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+ # ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+ # ADD BASE RSC /l 0x409 /d "NDEBUG"
+@@ -54,7 +54,7 @@
+ # ADD BSC32 /nologo
+ LINK32=link.exe
+ # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+-# ADD LINK32 ..\..\..\bzip2-1.0.5\libbz2.lib /nologo /base:"0x1D170000" /subsystem:windows /dll /debug /machine:I386 /nodefaultlib:"libc" /out:"./bz2.pyd"
++# ADD LINK32 ..\..\..\bzip2-1.0.6\libbz2.lib /nologo /base:"0x1D170000" /subsystem:windows /dll /debug /machine:I386 /nodefaultlib:"libc" /out:"./bz2.pyd"
+ # SUBTRACT LINK32 /pdb:none /nodefaultlib
+
+ !ELSEIF "$(CFG)" == "bz2 - Win32 Debug"
+@@ -72,7 +72,7 @@
+ # PROP Target_Dir ""
+ F90=df.exe
+ # ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "Py_BUILD_CORE_MODULE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
+-# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /I "..\..\Include" /I ".." /I "..\..\..\bzip2-1.0.5" /D "Py_BUILD_CORE_MODULE" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /YX /FD /c
++# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /I "..\..\Include" /I ".." /I "..\..\..\bzip2-1.0.6" /D "Py_BUILD_CORE_MODULE" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /YX /FD /c
+ # ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+ # ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+ # ADD BASE RSC /l 0x409 /d "_DEBUG"
+@@ -82,7 +82,7 @@
+ # ADD BSC32 /nologo
+ LINK32=link.exe
+ # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
+-# ADD LINK32 ..\..\..\bzip2-1.0.5\libbz2.lib /nologo /base:"0x1D170000" /subsystem:windows /dll /debug /machine:I386 /nodefaultlib:"msvcrt" /nodefaultlib:"libc" /out:"./bz2_d.pyd" /pdbtype:sept
++# ADD LINK32 ..\..\..\bzip2-1.0.6\libbz2.lib /nologo /base:"0x1D170000" /subsystem:windows /dll /debug /machine:I386 /nodefaultlib:"msvcrt" /nodefaultlib:"libc" /out:"./bz2_d.pyd" /pdbtype:sept
+ # SUBTRACT LINK32 /pdb:none
+
+ !ENDIF
+diff -r 70274d53c1dd PC/VC6/pythoncore.dsp
+--- a/PC/VC6/pythoncore.dsp
++++ b/PC/VC6/pythoncore.dsp
+@@ -663,6 +663,10 @@
+ # End Source File
+ # Begin Source File
+
++SOURCE=..\..\Python\random.c
++# End Source File
++# Begin Source File
++
+ SOURCE=..\..\Objects\rangeobject.c
+ # End Source File
+ # Begin Source File
+diff -r 70274d53c1dd PC/VC6/readme.txt
+--- a/PC/VC6/readme.txt
++++ b/PC/VC6/readme.txt
+@@ -120,14 +120,14 @@
+ Download the source from the python.org copy into the dist
+ directory:
+
+- svn export http://svn.python.org/projects/external/bzip2-1.0.5
++ svn export http://svn.python.org/projects/external/bzip2-1.0.6
+
+ And requires building bz2 first.
+
+- cd dist\bzip2-1.0.5
++ cd dist\bzip2-1.0.6
+ nmake -f makefile.msc
+
+- All of this managed to build bzip2-1.0.5\libbz2.lib, which the Python
++ All of this managed to build bzip2-1.0.6\libbz2.lib, which the Python
+ project links in.
+
+
+diff -r 70274d53c1dd PC/VS7.1/pythoncore.vcproj
+--- a/PC/VS7.1/pythoncore.vcproj
++++ b/PC/VS7.1/pythoncore.vcproj
+@@ -767,6 +767,9 @@
+ RelativePath="..\..\Python\pythonrun.c">
+ </File>
+ <File
++ RelativePath="..\..\Python\random.c">
++ </File>
++ <File
+ RelativePath="..\..\Objects\rangeobject.c">
+ </File>
+ <File
+diff -r 70274d53c1dd PC/VS8.0/bz2.vcproj
+--- a/PC/VS8.0/bz2.vcproj
++++ b/PC/VS8.0/bz2.vcproj
+@@ -532,7 +532,7 @@
+ </File>
+ </Filter>
+ <Filter
+- Name="bzip2 1.0.5 Header Files"
++ Name="bzip2 1.0.6 Header Files"
+ >
+ <File
+ RelativePath="$(bz2Dir)\bzlib.h"
+@@ -544,7 +544,7 @@
+ </File>
+ </Filter>
+ <Filter
+- Name="bzip2 1.0.5 Source Files"
++ Name="bzip2 1.0.6 Source Files"
+ >
+ <File
+ RelativePath="$(bz2Dir)\blocksort.c"
+diff -r 70274d53c1dd PC/VS8.0/pyproject.vsprops
+--- a/PC/VS8.0/pyproject.vsprops
++++ b/PC/VS8.0/pyproject.vsprops
+@@ -78,11 +78,11 @@
+ />
+ <UserMacro
+ Name="bz2Dir"
+- Value="$(externalsDir)\bzip2-1.0.5"
++ Value="$(externalsDir)\bzip2-1.0.6"
+ />
+ <UserMacro
+ Name="opensslDir"
+- Value="$(externalsDir)\openssl-0.9.8l"
++ Value="$(externalsDir)\openssl-0.9.8x"
+ />
+ <UserMacro
+ Name="tcltkDir"
+diff -r 70274d53c1dd PC/VS8.0/pythoncore.vcproj
+--- a/PC/VS8.0/pythoncore.vcproj
++++ b/PC/VS8.0/pythoncore.vcproj
+@@ -1835,6 +1835,10 @@
+ >
+ </File>
+ <File
++ RelativePath="..\..\Python\random.c"
++ >
++ </File>
++ <File
+ RelativePath="..\..\Python\structmember.c"
+ >
+ </File>
+diff -r 70274d53c1dd PC/_subprocess.c
+--- a/PC/_subprocess.c
++++ b/PC/_subprocess.c
+@@ -670,4 +670,5 @@
+ defint(d, "WAIT_OBJECT_0", WAIT_OBJECT_0);
+ defint(d, "CREATE_NEW_CONSOLE", CREATE_NEW_CONSOLE);
+ defint(d, "CREATE_NEW_PROCESS_GROUP", CREATE_NEW_PROCESS_GROUP);
++ defint(d, "STILL_ACTIVE", STILL_ACTIVE);
+ }
+diff -r 70274d53c1dd PC/pyconfig.h
+--- a/PC/pyconfig.h
++++ b/PC/pyconfig.h
+@@ -342,7 +342,7 @@
+ # define SIZEOF_FPOS_T 8
+ # define SIZEOF_HKEY 8
+ # define SIZEOF_SIZE_T 8
+-/* configure.in defines HAVE_LARGEFILE_SUPPORT iff HAVE_LONG_LONG,
++/* configure.ac defines HAVE_LARGEFILE_SUPPORT iff HAVE_LONG_LONG,
+ sizeof(off_t) > sizeof(long), and sizeof(PY_LONG_LONG) >= sizeof(off_t).
+ On Win64 the second condition is not true, but if fpos_t replaces off_t
+ then this is true. The uses of HAVE_LARGEFILE_SUPPORT imply that Win64
+diff -r 70274d53c1dd PCbuild/build_ssl.py
+--- a/PCbuild/build_ssl.py
++++ b/PCbuild/build_ssl.py
+@@ -64,37 +64,13 @@
+ print(" Please install ActivePerl and ensure it appears on your path")
+ return None
+
+-# Locate the best SSL directory given a few roots to look into.
+-def find_best_ssl_dir(sources):
+- candidates = []
+- for s in sources:
+- try:
+- # note: do not abspath s; the build will fail if any
+- # higher up directory name has spaces in it.
+- fnames = os.listdir(s)
+- except os.error:
+- fnames = []
+- for fname in fnames:
+- fqn = os.path.join(s, fname)
+- if os.path.isdir(fqn) and fname.startswith("openssl-"):
+- candidates.append(fqn)
+- # Now we have all the candidates, locate the best.
+- best_parts = []
+- best_name = None
+- for c in candidates:
+- parts = re.split("[.-]", os.path.basename(c))[1:]
+- # eg - openssl-0.9.7-beta1 - ignore all "beta" or any other qualifiers
+- if len(parts) >= 4:
+- continue
+- if parts > best_parts:
+- best_parts = parts
+- best_name = c
+- if best_name is not None:
+- print("Found an SSL directory at '%s'" % (best_name,))
+- else:
+- print("Could not find an SSL directory in '%s'" % (sources,))
+- sys.stdout.flush()
+- return best_name
++# Fetch SSL directory from VC properties
++def get_ssl_dir():
++ propfile = (os.path.join(os.path.dirname(__file__), 'pyproject.vsprops'))
++ with open(propfile) as f:
++ m = re.search('openssl-([^"]+)"', f.read())
++ return "..\..\openssl-"+m.group(1)
++
+
+ def create_makefile64(makefile, m32):
+ """Create and fix makefile for 64bit
+@@ -190,7 +166,7 @@
+ print("No Perl installation was found. Existing Makefiles are used.")
+ sys.stdout.flush()
+ # Look for SSL 2 levels up from pcbuild - ie, same place zlib etc all live.
+- ssl_dir = find_best_ssl_dir(("..\\..",))
++ ssl_dir = get_ssl_dir()
+ if ssl_dir is None:
+ sys.exit(1)
+
+diff -r 70274d53c1dd PCbuild/bz2.vcproj
+--- a/PCbuild/bz2.vcproj
++++ b/PCbuild/bz2.vcproj
+@@ -532,7 +532,7 @@
+ </File>
+ </Filter>
+ <Filter
+- Name="bzip2 1.0.5 Header Files"
++ Name="bzip2 1.0.6 Header Files"
+ >
+ <File
+ RelativePath="$(bz2Dir)\bzlib.h"
+@@ -544,7 +544,7 @@
+ </File>
+ </Filter>
+ <Filter
+- Name="bzip2 1.0.5 Source Files"
++ Name="bzip2 1.0.6 Source Files"
+ >
+ <File
+ RelativePath="$(bz2Dir)\blocksort.c"
+diff -r 70274d53c1dd PCbuild/pginstrument.vsprops
+--- a/PCbuild/pginstrument.vsprops
++++ b/PCbuild/pginstrument.vsprops
+@@ -22,7 +22,7 @@
+ <Tool
+ Name="VCLinkerTool"
+ OptimizeReferences="2"
+- EnableCOMDATFolding="2"
++ EnableCOMDATFolding="1"
+ LinkTimeCodeGeneration="2"
+ ProfileGuidedDatabase="$(SolutionDir)$(PlatformName)-pgi\$(TargetName).pgd"
+ ImportLibrary="$(OutDirPGI)\$(TargetName).lib"
+diff -r 70274d53c1dd PCbuild/pyproject.vsprops
+--- a/PCbuild/pyproject.vsprops
++++ b/PCbuild/pyproject.vsprops
+@@ -78,11 +78,11 @@
+ />
+ <UserMacro
+ Name="bz2Dir"
+- Value="$(externalsDir)\bzip2-1.0.5"
++ Value="$(externalsDir)\bzip2-1.0.6"
+ />
+ <UserMacro
+ Name="opensslDir"
+- Value="$(externalsDir)\openssl-0.9.8l"
++ Value="$(externalsDir)\openssl-0.9.8x"
+ />
+ <UserMacro
+ Name="tcltkDir"
+diff -r 70274d53c1dd PCbuild/readme.txt
+--- a/PCbuild/readme.txt
++++ b/PCbuild/readme.txt
+@@ -121,28 +121,28 @@
+ Download the source from the python.org copy into the dist
+ directory:
+
+- svn export http://svn.python.org/projects/external/bzip2-1.0.5
++ svn export http://svn.python.org/projects/external/bzip2-1.0.6
+
+ ** NOTE: if you use the Tools\buildbot\external(-amd64).bat approach for
+ obtaining external sources then you don't need to manually get the source
+ above via subversion. **
+
+ A custom pre-link step in the bz2 project settings should manage to
+- build bzip2-1.0.5\libbz2.lib by magic before bz2.pyd (or bz2_d.pyd) is
++ build bzip2-1.0.6\libbz2.lib by magic before bz2.pyd (or bz2_d.pyd) is
+ linked in PCbuild\.
+ However, the bz2 project is not smart enough to remove anything under
+- bzip2-1.0.5\ when you do a clean, so if you want to rebuild bzip2.lib
+- you need to clean up bzip2-1.0.5\ by hand.
++ bzip2-1.0.6\ when you do a clean, so if you want to rebuild bzip2.lib
++ you need to clean up bzip2-1.0.6\ by hand.
+
+ All of this managed to build libbz2.lib in
+- bzip2-1.0.5\$platform-$configuration\, which the Python project links in.
++ bzip2-1.0.6\$platform-$configuration\, which the Python project links in.
+
+ _ssl
+ Python wrapper for the secure sockets library.
+
+ Get the source code through
+
+- svn export http://svn.python.org/projects/external/openssl-0.9.8l
++ svn export http://svn.python.org/projects/external/openssl-0.9.8x
+
+ ** NOTE: if you use the Tools\buildbot\external(-amd64).bat approach for
+ obtaining external sources then you don't need to manually get the source
+diff -r 70274d53c1dd Parser/myreadline.c
+--- a/Parser/myreadline.c
++++ b/Parser/myreadline.c
+@@ -40,6 +40,10 @@
+ my_fgets(char *buf, int len, FILE *fp)
+ {
+ char *p;
++#ifdef MS_WINDOWS
++ int i;
++#endif
++
+ while (1) {
+ if (PyOS_InputHook != NULL)
+ (void)(PyOS_InputHook)();
+@@ -49,32 +53,24 @@
+ if (p != NULL)
+ return 0; /* No error */
+ #ifdef MS_WINDOWS
+- /* In the case of a Ctrl+C or some other external event
+- interrupting the operation:
+- Win2k/NT: ERROR_OPERATION_ABORTED is the most recent Win32
+- error code (and feof() returns TRUE).
+- Win9x: Ctrl+C seems to have no effect on fgets() returning
+- early - the signal handler is called, but the fgets()
+- only returns "normally" (ie, when Enter hit or feof())
++ /* Ctrl-C anywhere on the line or Ctrl-Z if the only character
++ on a line will set ERROR_OPERATION_ABORTED. Under normal
++ circumstances Ctrl-C will also have caused the SIGINT handler
++ to fire. This signal fires in another thread and is not
++ guaranteed to have occurred before this point in the code.
++
++ Therefore: check in a small loop to see if the trigger has
++ fired, in which case assume this is a Ctrl-C event. If it
++ hasn't fired within 10ms assume that this is a Ctrl-Z on its
++ own or that the signal isn't going to fire for some other
++ reason and drop through to check for EOF.
+ */
+ if (GetLastError()==ERROR_OPERATION_ABORTED) {
+- /* Signals come asynchronously, so we sleep a brief
+- moment before checking if the handler has been
+- triggered (we cant just return 1 before the
+- signal handler has been called, as the later
+- signal may be treated as a separate interrupt).
+- */
+- Sleep(1);
+- if (PyOS_InterruptOccurred()) {
+- return 1; /* Interrupt */
++ for (i = 0; i < 10; i++) {
++ if (PyOS_InterruptOccurred())
++ return 1;
++ Sleep(1);
+ }
+- /* Either the sleep wasn't long enough (need a
+- short loop retrying?) or not interrupted at all
+- (in which case we should revisit the whole thing!)
+- Logging some warning would be nice. assert is not
+- viable as under the debugger, the various dialogs
+- mean the condition is not true.
+- */
+ }
+ #endif /* MS_WINDOWS */
+ if (feof(fp)) {
+diff -r 70274d53c1dd Parser/node.c
+--- a/Parser/node.c
++++ b/Parser/node.c
+@@ -114,6 +114,7 @@
+
+ /* Forward */
+ static void freechildren(node *);
++static Py_ssize_t sizeofchildren(node *n);
+
+
+ void
+@@ -125,6 +126,16 @@
+ }
+ }
+
++Py_ssize_t
++_PyNode_SizeOf(node *n)
++{
++ Py_ssize_t res = 0;
++
++ if (n != NULL)
++ res = sizeof(node) + sizeofchildren(n);
++ return res;
++}
++
+ static void
+ freechildren(node *n)
+ {
+@@ -136,3 +147,18 @@
+ if (STR(n) != NULL)
+ PyObject_FREE(STR(n));
+ }
++
++static Py_ssize_t
++sizeofchildren(node *n)
++{
++ Py_ssize_t res = 0;
++ int i;
++ for (i = NCH(n); --i >= 0; )
++ res += sizeofchildren(CHILD(n, i));
++ if (n->n_child != NULL)
++ /* allocated size of n->n_child array */
++ res += XXXROUNDUP(NCH(n)) * sizeof(node);
++ if (STR(n) != NULL)
++ res += strlen(STR(n)) + 1;
++ return res;
++}
+diff -r 70274d53c1dd Parser/parsetok.c
+--- a/Parser/parsetok.c
++++ b/Parser/parsetok.c
+@@ -131,7 +131,7 @@
+ {
+ parser_state *ps;
+ node *n;
+- int started = 0, handling_import = 0, handling_with = 0;
++ int started = 0;
+
+ if ((ps = PyParser_New(g, start)) == NULL) {
+ fprintf(stderr, "no mem for new parser\n");
+@@ -163,7 +163,6 @@
+ }
+ if (type == ENDMARKER && started) {
+ type = NEWLINE; /* Add an extra newline */
+- handling_with = handling_import = 0;
+ started = 0;
+ /* Add the right number of dedent tokens,
+ except if a certain flag is given --
+diff -r 70274d53c1dd Python/bltinmodule.c
+--- a/Python/bltinmodule.c
++++ b/Python/bltinmodule.c
+@@ -53,8 +53,12 @@
+ PyDoc_STRVAR(import_doc,
+ "__import__(name, globals={}, locals={}, fromlist=[], level=-1) -> module\n\
+ \n\
+-Import a module. The globals are only used to determine the context;\n\
+-they are not modified. The locals are currently unused. The fromlist\n\
++Import a module. Because this function is meant for use by the Python\n\
++interpreter and not for general use it is better to use\n\
++importlib.import_module() to programmatically import a module.\n\
++\n\
++The globals argument is only used to determine the context;\n\
++they are not modified. The locals argument is unused. The fromlist\n\
+ should be a list of names to emulate ``from name import ...'', or an\n\
+ empty list to emulate ``import name''.\n\
+ When importing a module from a package, note that __import__('A.B', ...)\n\
+@@ -1578,6 +1582,7 @@
+ Py_CLEAR(str_newline);
+ return NULL;
+ }
++#ifdef Py_USING_UNICODE
+ unicode_newline = PyUnicode_FromString("\n");
+ if (unicode_newline == NULL) {
+ Py_CLEAR(str_newline);
+@@ -1591,6 +1596,7 @@
+ Py_CLEAR(unicode_space);
+ return NULL;
+ }
++#endif
+ }
+ if (!PyArg_ParseTupleAndKeywords(dummy_args, kwds, "|OOO:print",
+ kwlist, &sep, &end, &file))
+diff -r 70274d53c1dd Python/compile.c
+--- a/Python/compile.c
++++ b/Python/compile.c
+@@ -359,14 +359,31 @@
+ static PyObject *
+ dictbytype(PyObject *src, int scope_type, int flag, int offset)
+ {
+- Py_ssize_t pos = 0, i = offset, scope;
++ Py_ssize_t i = offset, scope, num_keys, key_i;
+ PyObject *k, *v, *dest = PyDict_New();
++ PyObject *sorted_keys;
+
+ assert(offset >= 0);
+ if (dest == NULL)
+ return NULL;
+
+- while (PyDict_Next(src, &pos, &k, &v)) {
++ /* Sort the keys so that we have a deterministic order on the indexes
++ saved in the returned dictionary. These indexes are used as indexes
++ into the free and cell var storage. Therefore if they aren't
++ deterministic, then the generated bytecode is not deterministic.
++ */
++ sorted_keys = PyDict_Keys(src);
++ if (sorted_keys == NULL)
++ return NULL;
++ if (PyList_Sort(sorted_keys) != 0) {
++ Py_DECREF(sorted_keys);
++ return NULL;
++ }
++ num_keys = PyList_GET_SIZE(sorted_keys);
++
++ for (key_i = 0; key_i < num_keys; key_i++) {
++ k = PyList_GET_ITEM(sorted_keys, key_i);
++ v = PyDict_GetItem(src, k);
+ /* XXX this should probably be a macro in symtable.h */
+ assert(PyInt_Check(v));
+ scope = (PyInt_AS_LONG(v) >> SCOPE_OFF) & SCOPE_MASK;
+@@ -374,12 +391,14 @@
+ if (scope == scope_type || PyInt_AS_LONG(v) & flag) {
+ PyObject *tuple, *item = PyInt_FromLong(i);
+ if (item == NULL) {
++ Py_DECREF(sorted_keys);
+ Py_DECREF(dest);
+ return NULL;
+ }
+ i++;
+ tuple = PyTuple_Pack(2, k, k->ob_type);
+ if (!tuple || PyDict_SetItem(dest, tuple, item) < 0) {
++ Py_DECREF(sorted_keys);
+ Py_DECREF(item);
+ Py_DECREF(dest);
+ Py_XDECREF(tuple);
+@@ -389,6 +408,7 @@
+ Py_DECREF(tuple);
+ }
+ }
++ Py_DECREF(sorted_keys);
+ return dest;
+ }
+
+diff -r 70274d53c1dd Python/future.c
+--- a/Python/future.c
++++ b/Python/future.c
+@@ -59,13 +59,6 @@
+ {
+ int i, found_docstring = 0, done = 0, prev_line = 0;
+
+- static PyObject *future;
+- if (!future) {
+- future = PyString_InternFromString("__future__");
+- if (!future)
+- return 0;
+- }
+-
+ if (!(mod->kind == Module_kind || mod->kind == Interactive_kind))
+ return 1;
+
+@@ -92,7 +85,9 @@
+ */
+
+ if (s->kind == ImportFrom_kind) {
+- if (s->v.ImportFrom.module == future) {
++ identifier modname = s->v.ImportFrom.module;
++ if (modname && PyString_GET_SIZE(modname) == 10 &&
++ !strcmp(PyString_AS_STRING(modname), "__future__")) {
+ if (done) {
+ PyErr_SetString(PyExc_SyntaxError,
+ ERR_LATE_FUTURE);
+diff -r 70274d53c1dd Python/getargs.c
+--- a/Python/getargs.c
++++ b/Python/getargs.c
+@@ -1410,7 +1410,7 @@
+ *errmsg = "convertible to a buffer";
+ return count;
+ }
+- PyBuffer_FillInfo(view, NULL, buf, count, 1, 0);
++ PyBuffer_FillInfo(view, arg, buf, count, 1, 0);
+ return 0;
+ }
+
+diff -r 70274d53c1dd Python/import.c
+--- a/Python/import.c
++++ b/Python/import.c
+@@ -114,6 +114,34 @@
+ };
+ #endif
+
++#ifdef MS_WINDOWS
++static int isdir(char *path) {
++ DWORD rv;
++ /* see issue1293 and issue3677:
++ * stat() on Windows doesn't recognise paths like
++ * "e:\\shared\\" and "\\\\whiterab-c2znlh\\shared" as dirs.
++ * Also reference issue6727:
++ * stat() on Windows is broken and doesn't resolve symlinks properly.
++ */
++ rv = GetFileAttributesA(path);
++ return rv != INVALID_FILE_ATTRIBUTES && rv & FILE_ATTRIBUTE_DIRECTORY;
++}
++#else
++#ifdef HAVE_STAT
++static int isdir(char *path) {
++ struct stat statbuf;
++ return stat(path, &statbuf) == 0 && S_ISDIR(statbuf.st_mode);
++}
++#else
++#ifdef RISCOS
++/* with RISCOS, isdir is in unixstuff */
++#else
++int isdir(char *path) {
++ return 0;
++}
++#endif /* RISCOS */
++#endif /* HAVE_STAT */
++#endif /* MS_WINDOWS */
+
+ /* Initialize things */
+
+@@ -968,9 +996,9 @@
+ {
+ struct stat st;
+ FILE *fpc;
+- char buf[MAXPATHLEN+1];
++ char *buf;
+ char *cpathname;
+- PyCodeObject *co;
++ PyCodeObject *co = NULL;
+ PyObject *m;
+
+ if (fstat(fileno(fp), &st) != 0) {
+@@ -987,6 +1015,10 @@
+ */
+ st.st_mtime &= 0xFFFFFFFF;
+ }
++ buf = PyMem_MALLOC(MAXPATHLEN+1);
++ if (buf == NULL) {
++ return PyErr_NoMemory();
++ }
+ cpathname = make_compiled_pathname(pathname, buf,
+ (size_t)MAXPATHLEN + 1);
+ if (cpathname != NULL &&
+@@ -994,9 +1026,9 @@
+ co = read_compiled_module(cpathname, fpc);
+ fclose(fpc);
+ if (co == NULL)
+- return NULL;
++ goto error_exit;
+ if (update_compiled_module(co, pathname) < 0)
+- return NULL;
++ goto error_exit;
+ if (Py_VerboseFlag)
+ PySys_WriteStderr("import %s # precompiled from %s\n",
+ name, cpathname);
+@@ -1005,7 +1037,7 @@
+ else {
+ co = parse_source_module(pathname, fp);
+ if (co == NULL)
+- return NULL;
++ goto error_exit;
+ if (Py_VerboseFlag)
+ PySys_WriteStderr("import %s # from %s\n",
+ name, pathname);
+@@ -1018,7 +1050,13 @@
+ m = PyImport_ExecCodeModuleEx(name, (PyObject *)co, pathname);
+ Py_DECREF(co);
+
++ PyMem_FREE(buf);
+ return m;
++
++error_exit:
++ Py_XDECREF(co);
++ PyMem_FREE(buf);
++ return NULL;
+ }
+
+
+@@ -1038,7 +1076,7 @@
+ PyObject *file = NULL;
+ PyObject *path = NULL;
+ int err;
+- char buf[MAXPATHLEN+1];
++ char *buf = NULL;
+ FILE *fp = NULL;
+ struct filedescr *fdp;
+
+@@ -1060,8 +1098,13 @@
+ err = PyDict_SetItemString(d, "__path__", path);
+ if (err != 0)
+ goto error;
++ buf = PyMem_MALLOC(MAXPATHLEN+1);
++ if (buf == NULL) {
++ PyErr_NoMemory();
++ goto error;
++ }
+ buf[0] = '\0';
+- fdp = find_module(name, "__init__", path, buf, sizeof(buf), &fp, NULL);
++ fdp = find_module(name, "__init__", path, buf, MAXPATHLEN+1, &fp, NULL);
+ if (fdp == NULL) {
+ if (PyErr_ExceptionMatches(PyExc_ImportError)) {
+ PyErr_Clear();
+@@ -1079,6 +1122,8 @@
+ error:
+ m = NULL;
+ cleanup:
++ if (buf)
++ PyMem_FREE(buf);
+ Py_XDECREF(path);
+ Py_XDECREF(file);
+ return m;
+@@ -1204,13 +1249,10 @@
+ char *filemode;
+ FILE *fp = NULL;
+ PyObject *path_hooks, *path_importer_cache;
+-#ifndef RISCOS
+- struct stat statbuf;
+-#endif
+ static struct filedescr fd_frozen = {"", "", PY_FROZEN};
+ static struct filedescr fd_builtin = {"", "", C_BUILTIN};
+ static struct filedescr fd_package = {"", "", PKG_DIRECTORY};
+- char name[MAXPATHLEN+1];
++ char *name;
+ #if defined(PYOS_OS2)
+ size_t saved_len;
+ size_t saved_namelen;
+@@ -1224,6 +1266,11 @@
+ "module name is too long");
+ return NULL;
+ }
++ name = PyMem_MALLOC(MAXPATHLEN+1);
++ if (name == NULL) {
++ PyErr_NoMemory();
++ return NULL;
++ }
+ strcpy(name, subname);
+
+ /* sys.meta_path import hook */
+@@ -1235,7 +1282,7 @@
+ PyErr_SetString(PyExc_RuntimeError,
+ "sys.meta_path must be a list of "
+ "import hooks");
+- return NULL;
++ goto error_exit;
+ }
+ Py_INCREF(meta_path); /* zap guard */
+ npath = PyList_Size(meta_path);
+@@ -1248,12 +1295,13 @@
+ path : Py_None);
+ if (loader == NULL) {
+ Py_DECREF(meta_path);
+- return NULL; /* true error */
++ goto error_exit; /* true error */
+ }
+ if (loader != Py_None) {
+ /* a loader was found */
+ *p_loader = loader;
+ Py_DECREF(meta_path);
++ PyMem_FREE(name);
+ return &importhookdescr;
+ }
+ Py_DECREF(loader);
+@@ -1267,7 +1315,7 @@
+ if (PyString_Size(path) + 1 + strlen(name) >= (size_t)buflen) {
+ PyErr_SetString(PyExc_ImportError,
+ "full frozen module name too long");
+- return NULL;
++ goto error_exit;
+ }
+ strcpy(buf, PyString_AsString(path));
+ strcat(buf, ".");
+@@ -1275,19 +1323,22 @@
+ strcpy(name, buf);
+ if (find_frozen(name) != NULL) {
+ strcpy(buf, name);
++ PyMem_FREE(name);
+ return &fd_frozen;
+ }
+ PyErr_Format(PyExc_ImportError,
+ "No frozen submodule named %.200s", name);
+- return NULL;
++ goto error_exit;
+ }
+ if (path == NULL) {
+ if (is_builtin(name)) {
+ strcpy(buf, name);
++ PyMem_FREE(name);
+ return &fd_builtin;
+ }
+ if ((find_frozen(name)) != NULL) {
+ strcpy(buf, name);
++ PyMem_FREE(name);
+ return &fd_frozen;
+ }
+
+@@ -1295,6 +1346,7 @@
+ fp = PyWin_FindRegisteredModule(name, &fdp, buf, buflen);
+ if (fp != NULL) {
+ *p_fp = fp;
++ PyMem_FREE(name);
+ return fdp;
+ }
+ #endif
+@@ -1303,7 +1355,7 @@
+ if (path == NULL || !PyList_Check(path)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "sys.path must be a list of directory names");
+- return NULL;
++ goto error_exit;
+ }
+
+ path_hooks = PySys_GetObject("path_hooks");
+@@ -1311,14 +1363,14 @@
+ PyErr_SetString(PyExc_RuntimeError,
+ "sys.path_hooks must be a list of "
+ "import hooks");
+- return NULL;
++ goto error_exit;
+ }
+ path_importer_cache = PySys_GetObject("path_importer_cache");
+ if (path_importer_cache == NULL ||
+ !PyDict_Check(path_importer_cache)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "sys.path_importer_cache must be a dict");
+- return NULL;
++ goto error_exit;
+ }
+
+ npath = PyList_Size(path);
+@@ -1327,13 +1379,13 @@
+ PyObject *copy = NULL;
+ PyObject *v = PyList_GetItem(path, i);
+ if (!v)
+- return NULL;
++ goto error_exit;
+ #ifdef Py_USING_UNICODE
+ if (PyUnicode_Check(v)) {
+ copy = PyUnicode_Encode(PyUnicode_AS_UNICODE(v),
+ PyUnicode_GET_SIZE(v), Py_FileSystemDefaultEncoding, NULL);
+ if (copy == NULL)
+- return NULL;
++ goto error_exit;
+ v = copy;
+ }
+ else
+@@ -1359,7 +1411,7 @@
+ path_hooks, v);
+ if (importer == NULL) {
+ Py_XDECREF(copy);
+- return NULL;
++ goto error_exit;
+ }
+ /* Note: importer is a borrowed reference */
+ if (importer != Py_None) {
+@@ -1369,10 +1421,11 @@
+ "s", fullname);
+ Py_XDECREF(copy);
+ if (loader == NULL)
+- return NULL; /* error */
++ goto error_exit; /* error */
+ if (loader != Py_None) {
+ /* a loader was found */
+ *p_loader = loader;
++ PyMem_FREE(name);
+ return &importhookdescr;
+ }
+ Py_DECREF(loader);
+@@ -1392,12 +1445,11 @@
+
+ /* Check for package import (buf holds a directory name,
+ and there's an __init__ module in that directory */
+-#ifdef HAVE_STAT
+- if (stat(buf, &statbuf) == 0 && /* it exists */
+- S_ISDIR(statbuf.st_mode) && /* it's a directory */
++ if (isdir(buf) && /* it's an existing directory */
+ case_ok(buf, len, namelen, name)) { /* case matches */
+ if (find_init_module(buf)) { /* and has __init__.py */
+ Py_XDECREF(copy);
++ PyMem_FREE(name);
+ return &fd_package;
+ }
+ else {
+@@ -1408,32 +1460,10 @@
+ if (PyErr_Warn(PyExc_ImportWarning,
+ warnstr)) {
+ Py_XDECREF(copy);
+- return NULL;
++ goto error_exit;
+ }
+ }
+ }
+-#else
+- /* XXX How are you going to test for directories? */
+-#ifdef RISCOS
+- if (isdir(buf) &&
+- case_ok(buf, len, namelen, name)) {
+- if (find_init_module(buf)) {
+- Py_XDECREF(copy);
+- return &fd_package;
+- }
+- else {
+- char warnstr[MAXPATHLEN+80];
+- sprintf(warnstr, "Not importing directory "
+- "'%.*s': missing __init__.py",
+- MAXPATHLEN, buf);
+- if (PyErr_Warn(PyExc_ImportWarning,
+- warnstr)) {
+- Py_XDECREF(copy);
+- return NULL;
+- }
+- }
+-#endif
+-#endif
+ #if defined(PYOS_OS2)
+ /* take a snapshot of the module spec for restoration
+ * after the 8 character DLL hackery
+@@ -1505,10 +1535,15 @@
+ if (fp == NULL) {
+ PyErr_Format(PyExc_ImportError,
+ "No module named %.200s", name);
+- return NULL;
++ goto error_exit;
+ }
+ *p_fp = fp;
++ PyMem_FREE(name);
+ return fdp;
++
++error_exit:
++ PyMem_FREE(name);
++ return NULL;
+ }
+
+ /* Helpers for main.c
+@@ -2115,7 +2150,7 @@
+ import_module_level(char *name, PyObject *globals, PyObject *locals,
+ PyObject *fromlist, int level)
+ {
+- char buf[MAXPATHLEN+1];
++ char *buf;
+ Py_ssize_t buflen = 0;
+ PyObject *parent, *head, *next, *tail;
+
+@@ -2129,14 +2164,18 @@
+ return NULL;
+ }
+
++ buf = PyMem_MALLOC(MAXPATHLEN+1);
++ if (buf == NULL) {
++ return PyErr_NoMemory();
++ }
+ parent = get_parent(globals, buf, &buflen, level);
+ if (parent == NULL)
+- return NULL;
++ goto error_exit;
+
+ head = load_next(parent, level < 0 ? Py_None : parent, &name, buf,
+ &buflen);
+ if (head == NULL)
+- return NULL;
++ goto error_exit;
+
+ tail = head;
+ Py_INCREF(tail);
+@@ -2145,7 +2184,7 @@
+ Py_DECREF(tail);
+ if (next == NULL) {
+ Py_DECREF(head);
+- return NULL;
++ goto error_exit;
+ }
+ tail = next;
+ }
+@@ -2157,7 +2196,7 @@
+ Py_DECREF(head);
+ PyErr_SetString(PyExc_ValueError,
+ "Empty module name");
+- return NULL;
++ goto error_exit;
+ }
+
+ if (fromlist != NULL) {
+@@ -2167,16 +2206,22 @@
+
+ if (fromlist == NULL) {
+ Py_DECREF(tail);
++ PyMem_FREE(buf);
+ return head;
+ }
+
+ Py_DECREF(head);
+ if (!ensure_fromlist(tail, fromlist, buf, buflen, 0)) {
+ Py_DECREF(tail);
+- return NULL;
++ goto error_exit;
+ }
+
++ PyMem_FREE(buf);
+ return tail;
++
++error_exit:
++ PyMem_FREE(buf);
++ return NULL;
+ }
+
+ PyObject *
+@@ -2566,7 +2611,7 @@
+ }
+ else {
+ PyObject *path, *loader = NULL;
+- char buf[MAXPATHLEN+1];
++ char *buf;
+ struct filedescr *fdp;
+ FILE *fp = NULL;
+
+@@ -2581,11 +2626,16 @@
+ }
+ }
+
++ buf = PyMem_MALLOC(MAXPATHLEN+1);
++ if (buf == NULL) {
++ return PyErr_NoMemory();
++ }
+ buf[0] = '\0';
+ fdp = find_module(fullname, subname, path, buf, MAXPATHLEN+1,
+ &fp, &loader);
+ Py_XDECREF(path);
+ if (fdp == NULL) {
++ PyMem_FREE(buf);
+ if (!PyErr_ExceptionMatches(PyExc_ImportError))
+ return NULL;
+ PyErr_Clear();
+@@ -2600,6 +2650,7 @@
+ Py_XDECREF(m);
+ m = NULL;
+ }
++ PyMem_FREE(buf);
+ }
+
+ return m;
+@@ -2617,7 +2668,7 @@
+ PyObject *modules = PyImport_GetModuleDict();
+ PyObject *path = NULL, *loader = NULL, *existing_m = NULL;
+ char *name, *subname;
+- char buf[MAXPATHLEN+1];
++ char *buf;
+ struct filedescr *fdp;
+ FILE *fp = NULL;
+ PyObject *newm;
+@@ -2677,6 +2728,11 @@
+ if (path == NULL)
+ PyErr_Clear();
+ }
++ buf = PyMem_MALLOC(MAXPATHLEN+1);
++ if (buf == NULL) {
++ Py_XDECREF(path);
++ return PyErr_NoMemory();
++ }
+ buf[0] = '\0';
+ fdp = find_module(name, subname, path, buf, MAXPATHLEN+1, &fp, &loader);
+ Py_XDECREF(path);
+@@ -2684,6 +2740,7 @@
+ if (fdp == NULL) {
+ Py_XDECREF(loader);
+ imp_modules_reloading_clear();
++ PyMem_FREE(buf);
+ return NULL;
+ }
+
+@@ -2701,6 +2758,7 @@
+ PyDict_SetItemString(modules, name, m);
+ }
+ imp_modules_reloading_clear();
++ PyMem_FREE(buf);
+ return newm;
+ }
+
+@@ -2831,19 +2889,27 @@
+ extern int fclose(FILE *);
+ PyObject *fob, *ret;
+ struct filedescr *fdp;
+- char pathname[MAXPATHLEN+1];
++ char *pathname;
+ FILE *fp = NULL;
+
++ pathname = PyMem_MALLOC(MAXPATHLEN+1);
++ if (pathname == NULL) {
++ return PyErr_NoMemory();
++ }
+ pathname[0] = '\0';
+ if (path == Py_None)
+ path = NULL;
+ fdp = find_module(NULL, name, path, pathname, MAXPATHLEN+1, &fp, NULL);
+- if (fdp == NULL)
++ if (fdp == NULL) {
++ PyMem_FREE(pathname);
+ return NULL;
++ }
+ if (fp != NULL) {
+ fob = PyFile_FromFile(fp, pathname, fdp->mode, fclose);
+- if (fob == NULL)
++ if (fob == NULL) {
++ PyMem_FREE(pathname);
+ return NULL;
++ }
+ }
+ else {
+ fob = Py_None;
+@@ -2852,6 +2918,7 @@
+ ret = Py_BuildValue("Os(ssi)",
+ fob, pathname, fdp->suffix, fdp->mode, fdp->type);
+ Py_DECREF(fob);
++ PyMem_FREE(pathname);
+ return ret;
+ }
+
+@@ -3199,49 +3266,11 @@
+ PyErr_SetString(PyExc_ImportError, "empty pathname");
+ return -1;
+ } else {
+-#ifndef RISCOS
+-#ifndef MS_WINDOWS
+- struct stat statbuf;
+- int rv;
+-
+- rv = stat(path, &statbuf);
+- if (rv == 0) {
+- /* it exists */
+- if (S_ISDIR(statbuf.st_mode)) {
+- /* it's a directory */
+- PyErr_SetString(PyExc_ImportError,
+- "existing directory");
+- return -1;
+- }
++ if(isdir(path)) {
++ PyErr_SetString(PyExc_ImportError,
++ "existing directory");
++ return -1;
+ }
+-#else /* MS_WINDOWS */
+- DWORD rv;
+- /* see issue1293 and issue3677:
+- * stat() on Windows doesn't recognise paths like
+- * "e:\\shared\\" and "\\\\whiterab-c2znlh\\shared" as dirs.
+- */
+- rv = GetFileAttributesA(path);
+- if (rv != INVALID_FILE_ATTRIBUTES) {
+- /* it exists */
+- if (rv & FILE_ATTRIBUTE_DIRECTORY) {
+- /* it's a directory */
+- PyErr_SetString(PyExc_ImportError,
+- "existing directory");
+- return -1;
+- }
+- }
+-#endif
+-#else /* RISCOS */
+- if (object_exists(path)) {
+- /* it exists */
+- if (isdir(path)) {
+- /* it's a directory */
+- PyErr_SetString(PyExc_ImportError,
+- "existing directory");
+- return -1;
+- }
+- }
+-#endif
+ }
+ return 0;
+ }
+diff -r 70274d53c1dd Python/peephole.c
+--- a/Python/peephole.c
++++ b/Python/peephole.c
+@@ -135,6 +135,7 @@
+ will return a surrogate. In both the cases skip the
+ optimization in order to produce compatible pycs.
+ */
++#ifdef Py_USING_UNICODE
+ if (newconst != NULL &&
+ PyUnicode_Check(v) && PyUnicode_Check(newconst)) {
+ Py_UNICODE ch = PyUnicode_AS_UNICODE(newconst)[0];
+@@ -147,6 +148,7 @@
+ return 0;
+ }
+ }
++#endif
+ break;
+ case BINARY_LSHIFT:
+ newconst = PyNumber_Lshift(v, w);
+@@ -345,7 +347,7 @@
+ codestr = (unsigned char *)memcpy(codestr,
+ PyString_AS_STRING(code), codelen);
+
+- /* Verify that RETURN_VALUE terminates the codestring. This allows
++ /* Verify that RETURN_VALUE terminates the codestring. This allows
+ the various transformation patterns to look ahead several
+ instructions without additional checks to make sure they are not
+ looking beyond the end of the code string.
+@@ -443,8 +445,8 @@
+ case BUILD_LIST:
+ j = GETARG(codestr, i);
+ h = i - 3 * j;
+- if (h >= 0 &&
+- j <= lastlc &&
++ if (h >= 0 &&
++ j <= lastlc &&
+ ((opcode == BUILD_TUPLE &&
+ ISBASICBLOCK(blocks, h, 3*(j+1))) ||
+ (opcode == BUILD_LIST &&
+@@ -488,8 +490,8 @@
+ case BINARY_AND:
+ case BINARY_XOR:
+ case BINARY_OR:
+- if (lastlc >= 2 &&
+- ISBASICBLOCK(blocks, i-6, 7) &&
++ if (lastlc >= 2 &&
++ ISBASICBLOCK(blocks, i-6, 7) &&
+ fold_binops_on_constants(&codestr[i-6], consts)) {
+ i -= 2;
+ assert(codestr[i] == LOAD_CONST);
+@@ -498,13 +500,13 @@
+ break;
+
+ /* Fold unary ops on constants.
+- LOAD_CONST c1 UNARY_OP --> LOAD_CONST unary_op(c) */
++ LOAD_CONST c1 UNARY_OP --> LOAD_CONST unary_op(c) */
+ case UNARY_NEGATIVE:
+ case UNARY_CONVERT:
+ case UNARY_INVERT:
+- if (lastlc >= 1 &&
+- ISBASICBLOCK(blocks, i-3, 4) &&
+- fold_unaryops_on_constants(&codestr[i-3], consts)) {
++ if (lastlc >= 1 &&
++ ISBASICBLOCK(blocks, i-3, 4) &&
++ fold_unaryops_on_constants(&codestr[i-3], consts)) {
+ i -= 2;
+ assert(codestr[i] == LOAD_CONST);
+ cumlc = 1;
+@@ -530,8 +532,7 @@
+ tgt = GETJUMPTGT(codestr, i);
+ j = codestr[tgt];
+ if (CONDITIONAL_JUMP(j)) {
+- /* NOTE: all possible jumps here are
+- absolute! */
++ /* NOTE: all possible jumps here are absolute! */
+ if (JUMPS_ON_TRUE(j) == JUMPS_ON_TRUE(opcode)) {
+ /* The second jump will be
+ taken iff the first is. */
+@@ -542,13 +543,10 @@
+ SETARG(codestr, i, tgttgt);
+ goto reoptimize_current;
+ } else {
+- /* The second jump is not taken
+- if the first is (so jump past
+- it), and all conditional
+- jumps pop their argument when
+- they're not taken (so change
+- the first jump to pop its
+- argument when it's taken). */
++ /* The second jump is not taken if the first is (so
++ jump past it), and all conditional jumps pop their
++ argument when they're not taken (so change the
++ first jump to pop its argument when it's taken). */
+ if (JUMPS_ON_TRUE(opcode))
+ codestr[i] = POP_JUMP_IF_TRUE;
+ else
+@@ -584,8 +582,8 @@
+ if (opcode == JUMP_FORWARD) /* JMP_ABS can go backwards */
+ opcode = JUMP_ABSOLUTE;
+ if (!ABSOLUTE_JUMP(opcode))
+- tgttgt -= i + 3; /* Calc relative jump addr */
+- if (tgttgt < 0) /* No backward relative jumps */
++ tgttgt -= i + 3; /* Calc relative jump addr */
++ if (tgttgt < 0) /* No backward relative jumps */
+ continue;
+ codestr[i] = opcode;
+ SETARG(codestr, i, tgttgt);
+diff -r 70274d53c1dd Python/pystate.c
+--- a/Python/pystate.c
++++ b/Python/pystate.c
+@@ -22,6 +22,9 @@
+ #endif
+ #endif
+
++#ifdef __cplusplus
++extern "C" {
++#endif
+
+ #ifdef WITH_THREAD
+ #include "pythread.h"
+@@ -30,10 +33,6 @@
+ #define HEAD_LOCK() PyThread_acquire_lock(head_mutex, WAIT_LOCK)
+ #define HEAD_UNLOCK() PyThread_release_lock(head_mutex)
+
+-#ifdef __cplusplus
+-extern "C" {
+-#endif
+-
+ /* The single PyInterpreterState used by this process'
+ GILState implementation
+ */
+@@ -654,10 +653,10 @@
+ PyEval_SaveThread();
+ }
+
++#endif /* WITH_THREAD */
++
+ #ifdef __cplusplus
+ }
+ #endif
+
+-#endif /* WITH_THREAD */
+
+-
+diff -r 70274d53c1dd Python/pythonrun.c
+--- a/Python/pythonrun.c
++++ b/Python/pythonrun.c
+@@ -989,55 +989,67 @@
+ return PyArg_ParseTuple(err, "O(ziiz)", message, filename,
+ lineno, offset, text);
+
++ *message = NULL;
++
+ /* new style errors. `err' is an instance */
+-
+- if (! (v = PyObject_GetAttrString(err, "msg")))
+- goto finally;
+- *message = v;
+-
+- if (!(v = PyObject_GetAttrString(err, "filename")))
+- goto finally;
+- if (v == Py_None)
+- *filename = NULL;
+- else if (! (*filename = PyString_AsString(v)))
++ *message = PyObject_GetAttrString(err, "msg");
++ if (!*message)
+ goto finally;
+
+- Py_DECREF(v);
+- if (!(v = PyObject_GetAttrString(err, "lineno")))
++ v = PyObject_GetAttrString(err, "filename");
++ if (!v)
++ goto finally;
++ if (v == Py_None) {
++ Py_DECREF(v);
++ *filename = NULL;
++ }
++ else {
++ *filename = PyString_AsString(v);
++ Py_DECREF(v);
++ if (!*filename)
++ goto finally;
++ }
++
++ v = PyObject_GetAttrString(err, "lineno");
++ if (!v)
+ goto finally;
+ hold = PyInt_AsLong(v);
+ Py_DECREF(v);
+- v = NULL;
+ if (hold < 0 && PyErr_Occurred())
+ goto finally;
+ *lineno = (int)hold;
+
+- if (!(v = PyObject_GetAttrString(err, "offset")))
++ v = PyObject_GetAttrString(err, "offset");
++ if (!v)
+ goto finally;
+ if (v == Py_None) {
+ *offset = -1;
+ Py_DECREF(v);
+- v = NULL;
+ } else {
+ hold = PyInt_AsLong(v);
+ Py_DECREF(v);
+- v = NULL;
+ if (hold < 0 && PyErr_Occurred())
+ goto finally;
+ *offset = (int)hold;
+ }
+
+- if (!(v = PyObject_GetAttrString(err, "text")))
++ v = PyObject_GetAttrString(err, "text");
++ if (!v)
+ goto finally;
+- if (v == Py_None)
++ if (v == Py_None) {
++ Py_DECREF(v);
+ *text = NULL;
+- else if (! (*text = PyString_AsString(v)))
+- goto finally;
+- Py_DECREF(v);
++ }
++ else {
++ *text = PyString_AsString(v);
++ Py_DECREF(v);
++ if (!*text)
++ goto finally;
++ }
+ return 1;
+
+ finally:
+- Py_XDECREF(v);
++ Py_XDECREF(*message);
+ return 0;
+ }
+
+diff -r 70274d53c1dd Python/thread.c
+--- a/Python/thread.c
++++ b/Python/thread.c
+@@ -24,7 +24,7 @@
+ #include <stdlib.h>
+
+ #ifdef __sgi
+-#ifndef HAVE_PTHREAD_H /* XXX Need to check in configure.in */
++#ifndef HAVE_PTHREAD_H /* XXX Need to check in configure.ac */
+ #undef _POSIX_THREADS
+ #endif
+ #endif
+diff -r 70274d53c1dd README
+--- a/README
++++ b/README
+@@ -241,7 +241,7 @@
+ - NeXT
+ - Irix 4 and --with-sgi-dl
+ - Linux 1
+-- Systems defining __d6_pthread_create (configure.in)
++- Systems defining __d6_pthread_create (configure.ac)
+ - Systems defining PY_PTHREAD_D4, PY_PTHREAD_D6,
+ or PY_PTHREAD_D7 in thread_pthread.h
+ - Systems using --with-dl-dld
+@@ -680,10 +680,10 @@
+ threads to work properly. Below is a table of those options,
+ collected by Bill Janssen. We would love to automate this process
+ more, but the information below is not enough to write a patch for the
+-configure.in file, so manual intervention is required. If you patch
+-the configure.in file and are confident that the patch works, please
++configure.ac file, so manual intervention is required. If you patch
++the configure.ac file and are confident that the patch works, please
+ send in the patch. (Don't bother patching the configure script itself
+--- it is regenerated each time the configure.in file changes.)
++-- it is regenerated each time the configure.ac file changes.)
+
+ Compiler switches for threads
+ .............................
+@@ -1201,7 +1201,7 @@
+ Tools/ Some useful programs written in Python
+ pyconfig.h.in Source from which pyconfig.h is created (GNU autoheader output)
+ configure Configuration shell script (GNU autoconf output)
+-configure.in Configuration specification (input for GNU autoconf)
++configure.ac Configuration specification (input for GNU autoconf)
+ install-sh Shell script used to install files
+ setup.py Python script used to build extension modules
+
+diff -r 70274d53c1dd Tools/buildbot/build-amd64.bat
+--- a/Tools/buildbot/build-amd64.bat
++++ b/Tools/buildbot/build-amd64.bat
+@@ -1,4 +1,5 @@
+ @rem Used by the buildbot "compile" step.
++set HOST_PYTHON="%CD%\PCbuild\amd64\python_d.exe"
+ cmd /c Tools\buildbot\external-amd64.bat
+ call "%VS90COMNTOOLS%\..\..\VC\vcvarsall.bat" x86_amd64
+ cmd /c Tools\buildbot\clean-amd64.bat
+diff -r 70274d53c1dd Tools/buildbot/external-common.bat
+--- a/Tools/buildbot/external-common.bat
++++ b/Tools/buildbot/external-common.bat
+@@ -4,7 +4,7 @@
+ cd ..
+ @rem XXX: If you need to force the buildbots to start from a fresh environment, uncomment
+ @rem the following, check it in, then check it out, comment it out, then check it back in.
+-@rem if exist bzip2-1.0.5 rd /s/q bzip2-1.0.5
++@rem if exist bzip2-1.0.6 rd /s/q bzip2-1.0.6
+ @rem if exist tcltk rd /s/q tcltk
+ @rem if exist tcltk64 rd /s/q tcltk64
+ @rem if exist tcl8.4.12 rd /s/q tcl8.4.12
+@@ -15,13 +15,13 @@
+ @rem if exist tk-8.4.18.1 rd /s/q tk-8.4.18.1
+ @rem if exist db-4.4.20 rd /s/q db-4.4.20
+ @rem if exist db-4.7.25.0 rd /s/q db-4.7.25.0
+-@rem if exist openssl-0.9.8l rd /s/q openssl-0.9.8l
++@rem if exist openssl-0.9.8x rd /s/q openssl-0.9.8x
+ @rem if exist sqlite-3.6.21 rd /s/q sqlite-3.6.21
+
+ @rem bzip
+-if not exist bzip2-1.0.5 (
+- rd /s/q bzip2-1.0.3
+- svn export http://svn.python.org/projects/external/bzip2-1.0.5
++if not exist bzip2-1.0.6 (
++ rd /s/q bzip2-1.0.5
++ svn export http://svn.python.org/projects/external/bzip2-1.0.6
+ )
+
+ @rem Berkeley DB
+@@ -29,7 +29,7 @@
+ if not exist db-4.7.25.0 svn export http://svn.python.org/projects/external/db-4.7.25.0
+
+ @rem OpenSSL
+-if not exist openssl-0.9.8l svn export http://svn.python.org/projects/external/openssl-0.9.8l
++if not exist openssl-0.9.8x svn export http://svn.python.org/projects/external/openssl-0.9.8x
+
+ @rem tcl/tk
+ if not exist tcl-8.5.2.1 (
+diff -r 70274d53c1dd Tools/buildbot/test-amd64.bat
+--- a/Tools/buildbot/test-amd64.bat
++++ b/Tools/buildbot/test-amd64.bat
+@@ -1,3 +1,3 @@
+ @rem Used by the buildbot "test" step.
+ cd PCbuild
+-call rt.bat -q -d -x64 -uall -rw
++call rt.bat -d -q -x64 -uall -rwW %1 %2 %3 %4 %5 %6 %7 %8 %9
+diff -r 70274d53c1dd Tools/buildbot/test.bat
+--- a/Tools/buildbot/test.bat
++++ b/Tools/buildbot/test.bat
+@@ -1,3 +1,3 @@
+ @rem Used by the buildbot "test" step.
+ cd PCbuild
+-call rt.bat -d -q -uall -rwW
++call rt.bat -d -q -uall -rwW %1 %2 %3 %4 %5 %6 %7 %8 %9
+diff -r 70274d53c1dd Tools/scripts/byext.py
+--- a/Tools/scripts/byext.py
++++ b/Tools/scripts/byext.py
+@@ -2,6 +2,8 @@
+
+ """Show file statistics by extension."""
+
++from __future__ import print_function
++
+ import os
+ import sys
+
+diff -r 70274d53c1dd configure.ac
+--- /dev/null
++++ b/configure.ac
+@@ -0,0 +1,4356 @@
++dnl ***********************************************
++dnl * Please run autoreconf to test your changes! *
++dnl ***********************************************
++
++# Set VERSION so we only need to edit in one place (i.e., here)
++m4_define(PYTHON_VERSION, 2.7)
++
++AC_PREREQ(2.65)
++
++AC_REVISION($Revision$)
++AC_INIT(python, PYTHON_VERSION, http://bugs.python.org/)
++AC_CONFIG_SRCDIR([Include/object.h])
++AC_CONFIG_HEADER(pyconfig.h)
++
++dnl Ensure that if prefix is specified, it does not end in a slash. If
++dnl it does, we get path names containing '//' which is both ugly and
++dnl can cause trouble.
++
++dnl Last slash shouldn't be stripped if prefix=/
++if test "$prefix" != "/"; then
++ prefix=`echo "$prefix" | sed -e 's/\/$//g'`
++fi
++
++dnl This is for stuff that absolutely must end up in pyconfig.h.
++dnl Please use pyport.h instead, if possible.
++AH_TOP([
++#ifndef Py_PYCONFIG_H
++#define Py_PYCONFIG_H
++])
++AH_BOTTOM([
++/* Define the macros needed if on a UnixWare 7.x system. */
++#if defined(__USLC__) && defined(__SCO_VERSION__)
++#define STRICT_SYSV_CURSES /* Don't use ncurses extensions */
++#endif
++
++#endif /*Py_PYCONFIG_H*/
++])
++
++# We don't use PACKAGE_ variables, and they cause conflicts
++# with other autoconf-based packages that include Python.h
++grep -v 'define PACKAGE_' <confdefs.h >confdefs.h.new
++rm confdefs.h
++mv confdefs.h.new confdefs.h
++
++AC_SUBST(VERSION)
++VERSION=PYTHON_VERSION
++
++AC_SUBST(SOVERSION)
++SOVERSION=1.0
++
++# The later defininition of _XOPEN_SOURCE disables certain features
++# on Linux, so we need _GNU_SOURCE to re-enable them (makedev, tm_zone).
++AC_DEFINE(_GNU_SOURCE, 1, [Define on Linux to activate all library features])
++
++# The later defininition of _XOPEN_SOURCE and _POSIX_C_SOURCE disables
++# certain features on NetBSD, so we need _NETBSD_SOURCE to re-enable
++# them.
++AC_DEFINE(_NETBSD_SOURCE, 1, [Define on NetBSD to activate all library features])
++
++# The later defininition of _XOPEN_SOURCE and _POSIX_C_SOURCE disables
++# certain features on FreeBSD, so we need __BSD_VISIBLE to re-enable
++# them.
++AC_DEFINE(__BSD_VISIBLE, 1, [Define on FreeBSD to activate all library features])
++
++# The later defininition of _XOPEN_SOURCE and _POSIX_C_SOURCE disables
++# u_int on Irix 5.3. Defining _BSD_TYPES brings it back.
++AC_DEFINE(_BSD_TYPES, 1, [Define on Irix to enable u_int])
++
++# The later defininition of _XOPEN_SOURCE and _POSIX_C_SOURCE disables
++# certain features on Mac OS X, so we need _DARWIN_C_SOURCE to re-enable
++# them.
++AC_DEFINE(_DARWIN_C_SOURCE, 1, [Define on Darwin to activate all library features])
++
++
++define_xopen_source=yes
++
++# Arguments passed to configure.
++AC_SUBST(CONFIG_ARGS)
++CONFIG_ARGS="$ac_configure_args"
++
++AC_MSG_CHECKING([for --enable-universalsdk])
++AC_ARG_ENABLE(universalsdk,
++ AS_HELP_STRING([--enable-universalsdk@<:@=SDKDIR@:>@], [Build against Mac OS X 10.4u SDK (ppc/i386)]),
++[
++ case $enableval in
++ yes)
++ enableval=/Developer/SDKs/MacOSX10.4u.sdk
++ if test ! -d "${enableval}"
++ then
++ enableval=/
++ fi
++ ;;
++ esac
++ case $enableval in
++ no)
++ UNIVERSALSDK=
++ enable_universalsdk=
++ ;;
++ *)
++ UNIVERSALSDK=$enableval
++ if test ! -d "${UNIVERSALSDK}"
++ then
++ AC_MSG_ERROR([--enable-universalsdk specifies non-existing SDK: ${UNIVERSALSDK}])
++ fi
++ ;;
++ esac
++
++],[
++ UNIVERSALSDK=
++ enable_universalsdk=
++])
++if test -n "${UNIVERSALSDK}"
++then
++ AC_MSG_RESULT(${UNIVERSALSDK})
++else
++ AC_MSG_RESULT(no)
++fi
++AC_SUBST(UNIVERSALSDK)
++
++AC_SUBST(ARCH_RUN_32BIT)
++
++UNIVERSAL_ARCHS="32-bit"
++AC_SUBST(LIPO_32BIT_FLAGS)
++AC_MSG_CHECKING(for --with-universal-archs)
++AC_ARG_WITH(universal-archs,
++ AS_HELP_STRING([--with-universal-archs=ARCH], [select architectures for universal build ("32-bit", "64-bit", "3-way", "intel" or "all")]),
++[
++ AC_MSG_RESULT($withval)
++ UNIVERSAL_ARCHS="$withval"
++ if test "${enable_universalsdk}" ; then
++ :
++ else
++ AC_MSG_ERROR([--with-universal-archs without --enable-universalsdk. See Mac/README])
++ fi
++],
++[
++ AC_MSG_RESULT(32-bit)
++])
++
++
++
++AC_ARG_WITH(framework-name,
++ AS_HELP_STRING([--with-framework-name=FRAMEWORK],
++ [specify an alternate name of the framework built with --enable-framework]),
++[
++ if test "${enable_framework}"; then
++ :
++ else
++ AC_MSG_ERROR([--with-framework-name without --enable-framework. See Mac/README])
++ fi
++ PYTHONFRAMEWORK=${withval}
++ PYTHONFRAMEWORKDIR=${withval}.framework
++ PYTHONFRAMEWORKIDENTIFIER=org.python.`echo $withval | tr '[A-Z]' '[a-z]'`
++ ],[
++ PYTHONFRAMEWORK=Python
++ PYTHONFRAMEWORKDIR=Python.framework
++ PYTHONFRAMEWORKIDENTIFIER=org.python.python
++])
++dnl quadrigraphs "@<:@" and "@:>@" produce "[" and "]" in the output
++AC_ARG_ENABLE(framework,
++ AS_HELP_STRING([--enable-framework@<:@=INSTALLDIR@:>@], [Build (MacOSX|Darwin) framework]),
++[
++ case $enableval in
++ yes)
++ enableval=/Library/Frameworks
++ esac
++ case $enableval in
++ no)
++ PYTHONFRAMEWORK=
++ PYTHONFRAMEWORKDIR=no-framework
++ PYTHONFRAMEWORKPREFIX=
++ PYTHONFRAMEWORKINSTALLDIR=
++ FRAMEWORKINSTALLFIRST=
++ FRAMEWORKINSTALLLAST=
++ FRAMEWORKALTINSTALLFIRST=
++ FRAMEWORKALTINSTALLLAST=
++ if test "x${prefix}" = "xNONE"; then
++ FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}"
++ else
++ FRAMEWORKUNIXTOOLSPREFIX="${prefix}"
++ fi
++ enable_framework=
++ ;;
++ *)
++ PYTHONFRAMEWORKPREFIX="${enableval}"
++ PYTHONFRAMEWORKINSTALLDIR=$PYTHONFRAMEWORKPREFIX/$PYTHONFRAMEWORKDIR
++ FRAMEWORKINSTALLFIRST="frameworkinstallstructure"
++ FRAMEWORKALTINSTALLFIRST="frameworkinstallstructure bininstall maninstall"
++ FRAMEWORKINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools"
++ FRAMEWORKALTINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkaltinstallunixtools"
++ FRAMEWORKINSTALLAPPSPREFIX="/Applications"
++
++ if test "x${prefix}" = "xNONE" ; then
++ FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}"
++
++ else
++ FRAMEWORKUNIXTOOLSPREFIX="${prefix}"
++ fi
++
++ case "${enableval}" in
++ /System*)
++ FRAMEWORKINSTALLAPPSPREFIX="/Applications"
++ if test "${prefix}" = "NONE" ; then
++ # See below
++ FRAMEWORKUNIXTOOLSPREFIX="/usr"
++ fi
++ ;;
++
++ /Library*)
++ FRAMEWORKINSTALLAPPSPREFIX="/Applications"
++ ;;
++
++ */Library/Frameworks)
++ MDIR="`dirname "${enableval}"`"
++ MDIR="`dirname "${MDIR}"`"
++ FRAMEWORKINSTALLAPPSPREFIX="${MDIR}/Applications"
++
++ if test "${prefix}" = "NONE"; then
++ # User hasn't specified the
++ # --prefix option, but wants to install
++ # the framework in a non-default location,
++ # ensure that the compatibility links get
++ # installed relative to that prefix as well
++ # instead of in /usr/local.
++ FRAMEWORKUNIXTOOLSPREFIX="${MDIR}"
++ fi
++ ;;
++
++ *)
++ FRAMEWORKINSTALLAPPSPREFIX="/Applications"
++ ;;
++ esac
++
++ prefix=$PYTHONFRAMEWORKINSTALLDIR/Versions/$VERSION
++
++ # Add files for Mac specific code to the list of output
++ # files:
++ AC_CONFIG_FILES(Mac/Makefile)
++ AC_CONFIG_FILES(Mac/PythonLauncher/Makefile)
++ AC_CONFIG_FILES(Mac/IDLE/Makefile)
++ AC_CONFIG_FILES(Mac/Resources/framework/Info.plist)
++ AC_CONFIG_FILES(Mac/Resources/app/Info.plist)
++ esac
++ ],[
++ PYTHONFRAMEWORK=
++ PYTHONFRAMEWORKDIR=no-framework
++ PYTHONFRAMEWORKPREFIX=
++ PYTHONFRAMEWORKINSTALLDIR=
++ FRAMEWORKINSTALLFIRST=
++ FRAMEWORKINSTALLLAST=
++ FRAMEWORKALTINSTALLFIRST=
++ FRAMEWORKALTINSTALLLAST=
++ if test "x${prefix}" = "xNONE" ; then
++ FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}"
++ else
++ FRAMEWORKUNIXTOOLSPREFIX="${prefix}"
++ fi
++ enable_framework=
++
++])
++AC_SUBST(PYTHONFRAMEWORK)
++AC_SUBST(PYTHONFRAMEWORKIDENTIFIER)
++AC_SUBST(PYTHONFRAMEWORKDIR)
++AC_SUBST(PYTHONFRAMEWORKPREFIX)
++AC_SUBST(PYTHONFRAMEWORKINSTALLDIR)
++AC_SUBST(FRAMEWORKINSTALLFIRST)
++AC_SUBST(FRAMEWORKINSTALLLAST)
++AC_SUBST(FRAMEWORKALTINSTALLFIRST)
++AC_SUBST(FRAMEWORKALTINSTALLLAST)
++AC_SUBST(FRAMEWORKUNIXTOOLSPREFIX)
++AC_SUBST(FRAMEWORKINSTALLAPPSPREFIX)
++
++##AC_ARG_WITH(dyld,
++## AS_HELP_STRING([--with-dyld],
++## [Use (OpenStep|Rhapsody) dynamic linker]))
++##
++# Set name for machine-dependent library files
++AC_SUBST(MACHDEP)
++AC_MSG_CHECKING(MACHDEP)
++if test -z "$MACHDEP"
++then
++ ac_sys_system=`uname -s`
++ if test "$ac_sys_system" = "AIX" \
++ -o "$ac_sys_system" = "UnixWare" -o "$ac_sys_system" = "OpenUNIX"; then
++ ac_sys_release=`uname -v`
++ else
++ ac_sys_release=`uname -r`
++ fi
++ ac_md_system=`echo $ac_sys_system |
++ tr -d '[/ ]' | tr '[[A-Z]]' '[[a-z]]'`
++ ac_md_release=`echo $ac_sys_release |
++ tr -d '[/ ]' | sed 's/^[[A-Z]]\.//' | sed 's/\..*//'`
++ MACHDEP="$ac_md_system$ac_md_release"
++
++ case $MACHDEP in
++ linux*) MACHDEP="linux2";;
++ cygwin*) MACHDEP="cygwin";;
++ darwin*) MACHDEP="darwin";;
++ atheos*) MACHDEP="atheos";;
++ irix646) MACHDEP="irix6";;
++ '') MACHDEP="unknown";;
++ esac
++fi
++
++# Some systems cannot stand _XOPEN_SOURCE being defined at all; they
++# disable features if it is defined, without any means to access these
++# features as extensions. For these systems, we skip the definition of
++# _XOPEN_SOURCE. Before adding a system to the list to gain access to
++# some feature, make sure there is no alternative way to access this
++# feature. Also, when using wildcards, make sure you have verified the
++# need for not defining _XOPEN_SOURCE on all systems matching the
++# wildcard, and that the wildcard does not include future systems
++# (which may remove their limitations).
++dnl quadrigraphs "@<:@" and "@:>@" produce "[" and "]" in the output
++case $ac_sys_system/$ac_sys_release in
++ # On OpenBSD, select(2) is not available if _XOPEN_SOURCE is defined,
++ # even though select is a POSIX function. Reported by J. Ribbens.
++ # Reconfirmed for OpenBSD 3.3 by Zachary Hamm, for 3.4 by Jason Ish.
++ # In addition, Stefan Krah confirms that issue #1244610 exists through
++ # OpenBSD 4.6, but is fixed in 4.7.
++ OpenBSD/2.* | OpenBSD/3.* | OpenBSD/4.@<:@0123456@:>@)
++ define_xopen_source=no
++ # OpenBSD undoes our definition of __BSD_VISIBLE if _XOPEN_SOURCE is
++ # also defined. This can be overridden by defining _BSD_SOURCE
++ # As this has a different meaning on Linux, only define it on OpenBSD
++ AC_DEFINE(_BSD_SOURCE, 1, [Define on OpenBSD to activate all library features])
++ ;;
++ OpenBSD/*)
++ # OpenBSD undoes our definition of __BSD_VISIBLE if _XOPEN_SOURCE is
++ # also defined. This can be overridden by defining _BSD_SOURCE
++ # As this has a different meaning on Linux, only define it on OpenBSD
++ AC_DEFINE(_BSD_SOURCE, 1, [Define on OpenBSD to activate all library features])
++ ;;
++ # Defining _XOPEN_SOURCE on NetBSD version prior to the introduction of
++ # _NETBSD_SOURCE disables certain features (eg. setgroups). Reported by
++ # Marc Recht
++ NetBSD/1.5 | NetBSD/1.5.* | NetBSD/1.6 | NetBSD/1.6.* | NetBSD/1.6@<:@A-S@:>@)
++ define_xopen_source=no;;
++ # From the perspective of Solaris, _XOPEN_SOURCE is not so much a
++ # request to enable features supported by the standard as a request
++ # to disable features not supported by the standard. The best way
++ # for Python to use Solaris is simply to leave _XOPEN_SOURCE out
++ # entirely and define __EXTENSIONS__ instead.
++ SunOS/*)
++ define_xopen_source=no;;
++ # On UnixWare 7, u_long is never defined with _XOPEN_SOURCE,
++ # but used in /usr/include/netinet/tcp.h. Reported by Tim Rice.
++ # Reconfirmed for 7.1.4 by Martin v. Loewis.
++ OpenUNIX/8.0.0| UnixWare/7.1.@<:@0-4@:>@)
++ define_xopen_source=no;;
++ # On OpenServer 5, u_short is never defined with _XOPEN_SOURCE,
++ # but used in struct sockaddr.sa_family. Reported by Tim Rice.
++ SCO_SV/3.2)
++ define_xopen_source=no;;
++ # On FreeBSD 4, the math functions C89 does not cover are never defined
++ # with _XOPEN_SOURCE and __BSD_VISIBLE does not re-enable them.
++ FreeBSD/4.*)
++ define_xopen_source=no;;
++ # On MacOS X 10.2, a bug in ncurses.h means that it craps out if
++ # _XOPEN_EXTENDED_SOURCE is defined. Apparently, this is fixed in 10.3, which
++ # identifies itself as Darwin/7.*
++ # On Mac OS X 10.4, defining _POSIX_C_SOURCE or _XOPEN_SOURCE
++ # disables platform specific features beyond repair.
++ # On Mac OS X 10.3, defining _POSIX_C_SOURCE or _XOPEN_SOURCE
++ # has no effect, don't bother defining them
++ Darwin/@<:@6789@:>@.*)
++ define_xopen_source=no;;
++ Darwin/1@<:@0-9@:>@.*)
++ define_xopen_source=no;;
++ # On AIX 4 and 5.1, mbstate_t is defined only when _XOPEN_SOURCE == 500 but
++ # used in wcsnrtombs() and mbsnrtowcs() even if _XOPEN_SOURCE is not defined
++ # or has another value. By not (re)defining it, the defaults come in place.
++ AIX/4)
++ define_xopen_source=no;;
++ AIX/5)
++ if test `uname -r` -eq 1; then
++ define_xopen_source=no
++ fi
++ ;;
++ # On QNX 6.3.2, defining _XOPEN_SOURCE prevents netdb.h from
++ # defining NI_NUMERICHOST.
++ QNX/6.3.2)
++ define_xopen_source=no
++ ;;
++
++esac
++
++if test $define_xopen_source = yes
++then
++ AC_DEFINE(_XOPEN_SOURCE, 600,
++ Define to the level of X/Open that your system supports)
++
++ # On Tru64 Unix 4.0F, defining _XOPEN_SOURCE also requires
++ # definition of _XOPEN_SOURCE_EXTENDED and _POSIX_C_SOURCE, or else
++ # several APIs are not declared. Since this is also needed in some
++ # cases for HP-UX, we define it globally.
++ AC_DEFINE(_XOPEN_SOURCE_EXTENDED, 1,
++ Define to activate Unix95-and-earlier features)
++
++ AC_DEFINE(_POSIX_C_SOURCE, 200112L, Define to activate features from IEEE Stds 1003.1-2001)
++
++fi
++
++#
++# SGI compilers allow the specification of the both the ABI and the
++# ISA on the command line. Depending on the values of these switches,
++# different and often incompatable code will be generated.
++#
++# The SGI_ABI variable can be used to modify the CC and LDFLAGS and
++# thus supply support for various ABI/ISA combinations. The MACHDEP
++# variable is also adjusted.
++#
++AC_SUBST(SGI_ABI)
++if test ! -z "$SGI_ABI"
++then
++ CC="cc $SGI_ABI"
++ LDFLAGS="$SGI_ABI $LDFLAGS"
++ MACHDEP=`echo "${MACHDEP}${SGI_ABI}" | sed 's/ *//g'`
++fi
++AC_MSG_RESULT($MACHDEP)
++
++# And add extra plat-mac for darwin
++AC_SUBST(EXTRAPLATDIR)
++AC_SUBST(EXTRAMACHDEPPATH)
++AC_MSG_CHECKING(EXTRAPLATDIR)
++if test -z "$EXTRAPLATDIR"
++then
++ case $MACHDEP in
++ darwin)
++ EXTRAPLATDIR="\$(PLATMACDIRS)"
++ EXTRAMACHDEPPATH="\$(PLATMACPATH)"
++ ;;
++ *)
++ EXTRAPLATDIR=""
++ EXTRAMACHDEPPATH=""
++ ;;
++ esac
++fi
++AC_MSG_RESULT($EXTRAPLATDIR)
++
++# Record the configure-time value of MACOSX_DEPLOYMENT_TARGET,
++# it may influence the way we can build extensions, so distutils
++# needs to check it
++AC_SUBST(CONFIGURE_MACOSX_DEPLOYMENT_TARGET)
++AC_SUBST(EXPORT_MACOSX_DEPLOYMENT_TARGET)
++CONFIGURE_MACOSX_DEPLOYMENT_TARGET=
++EXPORT_MACOSX_DEPLOYMENT_TARGET='#'
++
++AC_MSG_CHECKING(machine type as reported by uname -m)
++ac_sys_machine=`uname -m`
++AC_MSG_RESULT($ac_sys_machine)
++
++# checks for alternative programs
++
++# compiler flags are generated in two sets, BASECFLAGS and OPT. OPT is just
++# for debug/optimization stuff. BASECFLAGS is for flags that are required
++# just to get things to compile and link. Users are free to override OPT
++# when running configure or make. The build should not break if they do.
++# BASECFLAGS should generally not be messed with, however.
++
++# XXX shouldn't some/most/all of this code be merged with the stuff later
++# on that fiddles with OPT and BASECFLAGS?
++AC_MSG_CHECKING(for --without-gcc)
++AC_ARG_WITH(gcc,
++ AS_HELP_STRING([--without-gcc], [never use gcc]),
++[
++ case $withval in
++ no) CC=${CC:-cc}
++ without_gcc=yes;;
++ yes) CC=gcc
++ without_gcc=no;;
++ *) CC=$withval
++ without_gcc=$withval;;
++ esac], [
++ case $ac_sys_system in
++ AIX*) CC=${CC:-xlc_r}
++ without_gcc=;;
++ BeOS*)
++ case $BE_HOST_CPU in
++ ppc)
++ CC=mwcc
++ without_gcc=yes
++ BASECFLAGS="$BASECFLAGS -export pragma"
++ OPT="$OPT -O"
++ LDFLAGS="$LDFLAGS -nodup"
++ ;;
++ x86)
++ CC=gcc
++ without_gcc=no
++ OPT="$OPT -O"
++ ;;
++ *)
++ AC_MSG_ERROR([Unknown BeOS platform "$BE_HOST_CPU"])
++ ;;
++ esac
++ AR="\$(srcdir)/Modules/ar_beos"
++ RANLIB=:
++ ;;
++ *) without_gcc=no;;
++ esac])
++AC_MSG_RESULT($without_gcc)
++
++# If the user switches compilers, we can't believe the cache
++if test ! -z "$ac_cv_prog_CC" -a ! -z "$CC" -a "$CC" != "$ac_cv_prog_CC"
++then
++ AC_MSG_ERROR([cached CC is different -- throw away $cache_file
++(it is also a good idea to do 'make clean' before compiling)])
++fi
++
++# If the user set CFLAGS, use this instead of the automatically
++# determined setting
++preset_cflags="$CFLAGS"
++AC_PROG_CC
++if test ! -z "$preset_cflags"
++then
++ CFLAGS=$preset_cflags
++fi
++
++AC_SUBST(CXX)
++AC_SUBST(MAINCC)
++AC_MSG_CHECKING(for --with-cxx-main=<compiler>)
++AC_ARG_WITH(cxx_main,
++ AS_HELP_STRING([--with-cxx-main=<compiler>],
++ [compile main() and link python executable with C++ compiler]),
++[
++
++ case $withval in
++ no) with_cxx_main=no
++ MAINCC='$(CC)';;
++ yes) with_cxx_main=yes
++ MAINCC='$(CXX)';;
++ *) with_cxx_main=yes
++ MAINCC=$withval
++ if test -z "$CXX"
++ then
++ CXX=$withval
++ fi;;
++ esac], [
++ with_cxx_main=no
++ MAINCC='$(CC)'
++])
++AC_MSG_RESULT($with_cxx_main)
++
++preset_cxx="$CXX"
++if test -z "$CXX"
++then
++ case "$CC" in
++ gcc) AC_PATH_PROG(CXX, [g++], [g++], [notfound]) ;;
++ cc) AC_PATH_PROG(CXX, [c++], [c++], [notfound]) ;;
++ esac
++ if test "$CXX" = "notfound"
++ then
++ CXX=""
++ fi
++fi
++if test -z "$CXX"
++then
++ AC_CHECK_PROGS(CXX, $CCC c++ g++ gcc CC cxx cc++ cl, notfound)
++ if test "$CXX" = "notfound"
++ then
++ CXX=""
++ fi
++fi
++if test "$preset_cxx" != "$CXX"
++then
++ AC_MSG_WARN([
++
++ By default, distutils will build C++ extension modules with "$CXX".
++ If this is not intended, then set CXX on the configure command line.
++ ])
++fi
++
++
++# checks for UNIX variants that set C preprocessor variables
++AC_USE_SYSTEM_EXTENSIONS
++
++# Check for unsupported systems
++case $ac_sys_system/$ac_sys_release in
++atheos*|Linux*/1*)
++ echo This system \($ac_sys_system/$ac_sys_release\) is no longer supported.
++ echo See README for details.
++ exit 1;;
++esac
++
++AC_EXEEXT
++AC_MSG_CHECKING(for --with-suffix)
++AC_ARG_WITH(suffix,
++ AS_HELP_STRING([--with-suffix=.exe], [set executable suffix]),
++[
++ case $withval in
++ no) EXEEXT=;;
++ yes) EXEEXT=.exe;;
++ *) EXEEXT=$withval;;
++ esac])
++AC_MSG_RESULT($EXEEXT)
++
++# Test whether we're running on a non-case-sensitive system, in which
++# case we give a warning if no ext is given
++AC_SUBST(BUILDEXEEXT)
++AC_MSG_CHECKING(for case-insensitive build directory)
++if test ! -d CaseSensitiveTestDir; then
++mkdir CaseSensitiveTestDir
++fi
++
++if test -d casesensitivetestdir
++then
++ AC_MSG_RESULT(yes)
++ BUILDEXEEXT=.exe
++else
++ AC_MSG_RESULT(no)
++ BUILDEXEEXT=$EXEEXT
++fi
++rmdir CaseSensitiveTestDir
++
++case $MACHDEP in
++bsdos*)
++ case $CC in
++ gcc) CC="$CC -D_HAVE_BSDI";;
++ esac;;
++esac
++
++case $ac_sys_system in
++hp*|HP*)
++ case $CC in
++ cc|*/cc) CC="$CC -Ae";;
++ esac;;
++SunOS*)
++ # Some functions have a prototype only with that define, e.g. confstr
++ AC_DEFINE(__EXTENSIONS__, 1, [Defined on Solaris to see additional function prototypes.])
++ ;;
++esac
++
++
++AC_SUBST(LIBRARY)
++AC_MSG_CHECKING(LIBRARY)
++if test -z "$LIBRARY"
++then
++ LIBRARY='libpython$(VERSION).a'
++fi
++AC_MSG_RESULT($LIBRARY)
++
++# LDLIBRARY is the name of the library to link against (as opposed to the
++# name of the library into which to insert object files). BLDLIBRARY is also
++# the library to link against, usually. On Mac OS X frameworks, BLDLIBRARY
++# is blank as the main program is not linked directly against LDLIBRARY.
++# LDLIBRARYDIR is the path to LDLIBRARY, which is made in a subdirectory. On
++# systems without shared libraries, LDLIBRARY is the same as LIBRARY
++# (defined in the Makefiles). On Cygwin LDLIBRARY is the import library,
++# DLLLIBRARY is the shared (i.e., DLL) library.
++#
++# RUNSHARED is used to run shared python without installed libraries
++#
++# INSTSONAME is the name of the shared library that will be use to install
++# on the system - some systems like version suffix, others don't
++AC_SUBST(LDLIBRARY)
++AC_SUBST(DLLLIBRARY)
++AC_SUBST(BLDLIBRARY)
++AC_SUBST(LDLIBRARYDIR)
++AC_SUBST(INSTSONAME)
++AC_SUBST(RUNSHARED)
++LDLIBRARY="$LIBRARY"
++BLDLIBRARY='$(LDLIBRARY)'
++INSTSONAME='$(LDLIBRARY)'
++DLLLIBRARY=''
++LDLIBRARYDIR=''
++RUNSHARED=''
++
++# LINKCC is the command that links the python executable -- default is $(CC).
++# If CXX is set, and if it is needed to link a main function that was
++# compiled with CXX, LINKCC is CXX instead. Always using CXX is undesirable:
++# python might then depend on the C++ runtime
++# This is altered for AIX in order to build the export list before
++# linking.
++AC_SUBST(LINKCC)
++AC_MSG_CHECKING(LINKCC)
++if test -z "$LINKCC"
++then
++ LINKCC='$(PURIFY) $(MAINCC)'
++ case $ac_sys_system in
++ AIX*)
++ exp_extra="\"\""
++ if test $ac_sys_release -ge 5 -o \
++ $ac_sys_release -eq 4 -a `uname -r` -ge 2 ; then
++ exp_extra="."
++ fi
++ LINKCC="\$(srcdir)/Modules/makexp_aix Modules/python.exp $exp_extra \$(LIBRARY); $LINKCC";;
++ QNX*)
++ # qcc must be used because the other compilers do not
++ # support -N.
++ LINKCC=qcc;;
++ esac
++fi
++AC_MSG_RESULT($LINKCC)
++
++# GNULD is set to "yes" if the GNU linker is used. If this goes wrong
++# make sure we default having it set to "no": this is used by
++# distutils.unixccompiler to know if it should add --enable-new-dtags
++# to linker command lines, and failing to detect GNU ld simply results
++# in the same bahaviour as before.
++AC_SUBST(GNULD)
++AC_MSG_CHECKING(for GNU ld)
++ac_prog=ld
++if test "$GCC" = yes; then
++ ac_prog=`$CC -print-prog-name=ld`
++fi
++case `"$ac_prog" -V 2>&1 < /dev/null` in
++ *GNU*)
++ GNULD=yes;;
++ *)
++ GNULD=no;;
++esac
++AC_MSG_RESULT($GNULD)
++
++AC_MSG_CHECKING(for --enable-shared)
++AC_ARG_ENABLE(shared,
++ AS_HELP_STRING([--enable-shared], [disable/enable building shared python library]))
++
++if test -z "$enable_shared"
++then
++ case $ac_sys_system in
++ CYGWIN* | atheos*)
++ enable_shared="yes";;
++ *)
++ enable_shared="no";;
++ esac
++fi
++AC_MSG_RESULT($enable_shared)
++
++AC_MSG_CHECKING(for --enable-profiling)
++AC_ARG_ENABLE(profiling,
++ AS_HELP_STRING([--enable-profiling], [enable C-level code profiling]),
++[ac_save_cc="$CC"
++ CC="$CC -pg"
++ AC_RUN_IFELSE([AC_LANG_SOURCE([[int main() { return 0; }]])],
++ [ac_enable_profiling="yes"],
++ [ac_enable_profiling="no"],
++ [ac_enable_profiling="no"])
++ CC="$ac_save_cc"])
++AC_MSG_RESULT($ac_enable_profiling)
++
++case "$ac_enable_profiling" in
++ "yes")
++ BASECFLAGS="-pg $BASECFLAGS"
++ LDFLAGS="-pg $LDFLAGS"
++ ;;
++esac
++
++AC_MSG_CHECKING(LDLIBRARY)
++
++# MacOSX framework builds need more magic. LDLIBRARY is the dynamic
++# library that we build, but we do not want to link against it (we
++# will find it with a -framework option). For this reason there is an
++# extra variable BLDLIBRARY against which Python and the extension
++# modules are linked, BLDLIBRARY. This is normally the same as
++# LDLIBRARY, but empty for MacOSX framework builds.
++if test "$enable_framework"
++then
++ LDLIBRARY='$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ RUNSHARED=DYLD_FRAMEWORK_PATH="`pwd`:$DYLD_FRAMEWORK_PATH"
++ BLDLIBRARY=''
++else
++ BLDLIBRARY='$(LDLIBRARY)'
++fi
++
++# Other platforms follow
++if test $enable_shared = "yes"; then
++ AC_DEFINE(Py_ENABLE_SHARED, 1, [Defined if Python is built as a shared library.])
++ case $ac_sys_system in
++ BeOS*)
++ LDLIBRARY='libpython$(VERSION).so'
++ ;;
++ CYGWIN*)
++ LDLIBRARY='libpython$(VERSION).dll.a'
++ DLLLIBRARY='libpython$(VERSION).dll'
++ ;;
++ SunOS*)
++ LDLIBRARY='libpython$(VERSION).so'
++ BLDLIBRARY='-Wl,-R,$(LIBDIR) -L. -lpython$(VERSION)'
++ RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
++ INSTSONAME="$LDLIBRARY".$SOVERSION
++ ;;
++ Linux*|GNU*|NetBSD*|FreeBSD*|DragonFly*|OpenBSD*)
++ LDLIBRARY='libpython$(VERSION).so'
++ BLDLIBRARY='-L. -lpython$(VERSION)'
++ RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
++ case $ac_sys_system in
++ FreeBSD*)
++ SOVERSION=`echo $SOVERSION|cut -d "." -f 1`
++ ;;
++ esac
++ INSTSONAME="$LDLIBRARY".$SOVERSION
++ ;;
++ hp*|HP*)
++ case `uname -m` in
++ ia64)
++ LDLIBRARY='libpython$(VERSION).so'
++ ;;
++ *)
++ LDLIBRARY='libpython$(VERSION).sl'
++ ;;
++ esac
++ BLDLIBRARY='-Wl,+b,$(LIBDIR) -L. -lpython$(VERSION)'
++ RUNSHARED=SHLIB_PATH=`pwd`:${SHLIB_PATH}
++ ;;
++ OSF*)
++ LDLIBRARY='libpython$(VERSION).so'
++ BLDLIBRARY='-rpath $(LIBDIR) -L. -lpython$(VERSION)'
++ RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
++ ;;
++ atheos*)
++ LDLIBRARY='libpython$(VERSION).so'
++ BLDLIBRARY='-L. -lpython$(VERSION)'
++ RUNSHARED=DLL_PATH=`pwd`:${DLL_PATH:-/atheos/sys/libs:/atheos/autolnk/lib}
++ ;;
++ Darwin*)
++ LDLIBRARY='libpython$(VERSION).dylib'
++ BLDLIBRARY='-L. -lpython$(VERSION)'
++ RUNSHARED='DYLD_LIBRARY_PATH=`pwd`:${DYLD_LIBRARY_PATH}'
++ ;;
++ AIX*)
++ LDLIBRARY='libpython$(VERSION).so'
++ RUNSHARED=LIBPATH=`pwd`:${LIBPATH}
++ ;;
++
++ esac
++else # shared is disabled
++ case $ac_sys_system in
++ CYGWIN*)
++ BLDLIBRARY='$(LIBRARY)'
++ LDLIBRARY='libpython$(VERSION).dll.a'
++ ;;
++ esac
++fi
++
++AC_MSG_RESULT($LDLIBRARY)
++
++AC_PROG_RANLIB
++AC_SUBST(AR)
++AC_CHECK_PROGS(AR, ar aal, ar)
++
++# tweak ARFLAGS only if the user didn't set it on the command line
++AC_SUBST(ARFLAGS)
++if test -z "$ARFLAGS"
++then
++ ARFLAGS="rc"
++fi
++
++AC_SUBST(SVNVERSION)
++AC_CHECK_PROG(SVNVERSION, svnversion, found, not-found)
++if test $SVNVERSION = found
++then
++ SVNVERSION="svnversion \$(srcdir)"
++else
++ SVNVERSION="echo Unversioned directory"
++fi
++
++AC_SUBST(HGVERSION)
++AC_SUBST(HGTAG)
++AC_SUBST(HGBRANCH)
++AC_CHECK_PROG(HAS_HG, hg, found, not-found)
++if test $HAS_HG = found
++then
++ HGVERSION="hg id -i \$(srcdir)"
++ HGTAG="hg id -t \$(srcdir)"
++ HGBRANCH="hg id -b \$(srcdir)"
++else
++ HGVERSION=""
++ HGTAG=""
++ HGBRANCH=""
++fi
++
++case $MACHDEP in
++bsdos*|hp*|HP*)
++ # install -d does not work on BSDI or HP-UX
++ if test -z "$INSTALL"
++ then
++ INSTALL="${srcdir}/install-sh -c"
++ fi
++esac
++AC_PROG_INSTALL
++
++# Not every filesystem supports hard links
++AC_SUBST(LN)
++if test -z "$LN" ; then
++ case $ac_sys_system in
++ BeOS*) LN="ln -s";;
++ CYGWIN*) LN="ln -s";;
++ atheos*) LN="ln -s";;
++ *) LN=ln;;
++ esac
++fi
++
++# Check for --with-pydebug
++AC_MSG_CHECKING(for --with-pydebug)
++AC_ARG_WITH(pydebug,
++ AS_HELP_STRING([--with-pydebug], [build with Py_DEBUG defined]),
++[
++if test "$withval" != no
++then
++ AC_DEFINE(Py_DEBUG, 1,
++ [Define if you want to build an interpreter with many run-time checks.])
++ AC_MSG_RESULT(yes);
++ Py_DEBUG='true'
++else AC_MSG_RESULT(no); Py_DEBUG='false'
++fi],
++[AC_MSG_RESULT(no)])
++
++# XXX Shouldn't the code above that fiddles with BASECFLAGS and OPT be
++# merged with this chunk of code?
++
++# Optimizer/debugger flags
++# ------------------------
++# (The following bit of code is complicated enough - please keep things
++# indented properly. Just pretend you're editing Python code. ;-)
++
++# There are two parallel sets of case statements below, one that checks to
++# see if OPT was set and one that does BASECFLAGS setting based upon
++# compiler and platform. BASECFLAGS tweaks need to be made even if the
++# user set OPT.
++
++# tweak OPT based on compiler and platform, only if the user didn't set
++# it on the command line
++AC_SUBST(OPT)
++if test "${OPT-unset}" = "unset"
++then
++ case $GCC in
++ yes)
++ if test "$CC" != 'g++' ; then
++ STRICT_PROTO="-Wstrict-prototypes"
++ fi
++ # For gcc 4.x we need to use -fwrapv so lets check if its supported
++ if "$CC" -v --help 2>/dev/null |grep -- -fwrapv > /dev/null; then
++ WRAP="-fwrapv"
++ fi
++
++ # Clang also needs -fwrapv
++ case $CC in
++ *clang*) WRAP="-fwrapv"
++ ;;
++ esac
++
++ case $ac_cv_prog_cc_g in
++ yes)
++ if test "$Py_DEBUG" = 'true' ; then
++ # Optimization messes up debuggers, so turn it off for
++ # debug builds.
++ OPT="-g -O0 -Wall $STRICT_PROTO"
++ else
++ OPT="-g $WRAP -O3 -Wall $STRICT_PROTO"
++ fi
++ ;;
++ *)
++ OPT="-O3 -Wall $STRICT_PROTO"
++ ;;
++ esac
++ case $ac_sys_system in
++ SCO_SV*) OPT="$OPT -m486 -DSCO5"
++ ;;
++ esac
++ ;;
++
++ *)
++ OPT="-O"
++ ;;
++ esac
++fi
++
++AC_SUBST(BASECFLAGS)
++
++# The -arch flags for universal builds on OSX
++UNIVERSAL_ARCH_FLAGS=
++AC_SUBST(UNIVERSAL_ARCH_FLAGS)
++
++# tweak BASECFLAGS based on compiler and platform
++case $GCC in
++yes)
++ # Python violates C99 rules, by casting between incompatible
++ # pointer types. GCC may generate bad code as a result of that,
++ # so use -fno-strict-aliasing if supported.
++ AC_MSG_CHECKING(whether $CC accepts -fno-strict-aliasing)
++ ac_save_cc="$CC"
++ CC="$CC -fno-strict-aliasing"
++ AC_CACHE_VAL(ac_cv_no_strict_aliasing_ok,
++ AC_COMPILE_IFELSE(
++ [AC_LANG_PROGRAM([[]], [[]])],
++ [ac_cv_no_strict_aliasing_ok=yes],
++ [ac_cv_no_strict_aliasing_ok=no]))
++ CC="$ac_save_cc"
++ AC_MSG_RESULT($ac_cv_no_strict_aliasing_ok)
++ if test $ac_cv_no_strict_aliasing_ok = yes
++ then
++ BASECFLAGS="$BASECFLAGS -fno-strict-aliasing"
++ fi
++
++ # if using gcc on alpha, use -mieee to get (near) full IEEE 754
++ # support. Without this, treatment of subnormals doesn't follow
++ # the standard.
++ case $ac_sys_machine in
++ alpha*)
++ BASECFLAGS="$BASECFLAGS -mieee"
++ ;;
++ esac
++
++ case $ac_sys_system in
++ SCO_SV*)
++ BASECFLAGS="$BASECFLAGS -m486 -DSCO5"
++ ;;
++ # is there any other compiler on Darwin besides gcc?
++ Darwin*)
++ # -Wno-long-double, -no-cpp-precomp, and -mno-fused-madd
++ # used to be here, but non-Apple gcc doesn't accept them.
++ if test "${CC}" = gcc
++ then
++ AC_MSG_CHECKING(which compiler should be used)
++ case "${UNIVERSALSDK}" in
++ */MacOSX10.4u.sdk)
++ # Build using 10.4 SDK, force usage of gcc when the
++ # compiler is gcc, otherwise the user will get very
++ # confusing error messages when building on OSX 10.6
++ CC=gcc-4.0
++ CPP=cpp-4.0
++ ;;
++ esac
++ AC_MSG_RESULT($CC)
++ fi
++
++ # Calculate the right deployment target for this build.
++ #
++ cur_target=`sw_vers -productVersion | sed 's/\(10\.[[0-9]]*\).*/\1/'`
++ if test ${cur_target} '>' 10.2; then
++ cur_target=10.3
++ if test ${enable_universalsdk}; then
++ if test "${UNIVERSAL_ARCHS}" = "all"; then
++ # Ensure that the default platform for a
++ # 4-way universal build is OSX 10.5,
++ # that's the first OS release where
++ # 4-way builds make sense.
++ cur_target='10.5'
++
++ elif test "${UNIVERSAL_ARCHS}" = "3-way"; then
++ cur_target='10.5'
++
++ elif test "${UNIVERSAL_ARCHS}" = "intel"; then
++ cur_target='10.5'
++
++ elif test "${UNIVERSAL_ARCHS}" = "64-bit"; then
++ cur_target='10.5'
++ fi
++ else
++ if test `/usr/bin/arch` = "i386"; then
++ # On Intel macs default to a deployment
++ # target of 10.4, that's the first OSX
++ # release with Intel support.
++ cur_target="10.4"
++ fi
++ fi
++ fi
++ CONFIGURE_MACOSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET-${cur_target}}
++
++ # Make sure that MACOSX_DEPLOYMENT_TARGET is set in the
++ # environment with a value that is the same as what we'll use
++ # in the Makefile to ensure that we'll get the same compiler
++ # environment during configure and build time.
++ MACOSX_DEPLOYMENT_TARGET="$CONFIGURE_MACOSX_DEPLOYMENT_TARGET"
++ export MACOSX_DEPLOYMENT_TARGET
++ EXPORT_MACOSX_DEPLOYMENT_TARGET=''
++
++ if test "${enable_universalsdk}"; then
++ UNIVERSAL_ARCH_FLAGS=""
++ if test "$UNIVERSAL_ARCHS" = "32-bit" ; then
++ UNIVERSAL_ARCH_FLAGS="-arch ppc -arch i386"
++ ARCH_RUN_32BIT=""
++ LIPO_32BIT_FLAGS=""
++
++ elif test "$UNIVERSAL_ARCHS" = "64-bit" ; then
++ UNIVERSAL_ARCH_FLAGS="-arch ppc64 -arch x86_64"
++ LIPO_32BIT_FLAGS=""
++ ARCH_RUN_32BIT="true"
++
++ elif test "$UNIVERSAL_ARCHS" = "all" ; then
++ UNIVERSAL_ARCH_FLAGS="-arch i386 -arch ppc -arch ppc64 -arch x86_64"
++ LIPO_32BIT_FLAGS="-extract ppc7400 -extract i386"
++ ARCH_RUN_32BIT="/usr/bin/arch -i386 -ppc"
++
++ elif test "$UNIVERSAL_ARCHS" = "intel" ; then
++ UNIVERSAL_ARCH_FLAGS="-arch i386 -arch x86_64"
++ LIPO_32BIT_FLAGS="-extract i386"
++ ARCH_RUN_32BIT="/usr/bin/arch -i386"
++
++ elif test "$UNIVERSAL_ARCHS" = "3-way" ; then
++ UNIVERSAL_ARCH_FLAGS="-arch i386 -arch ppc -arch x86_64"
++ LIPO_32BIT_FLAGS="-extract ppc7400 -extract i386"
++ ARCH_RUN_32BIT="/usr/bin/arch -i386 -ppc"
++
++ else
++ AC_MSG_ERROR([proper usage is --with-universal-arch=32-bit|64-bit|all|intel|3-way])
++
++ fi
++
++
++ CFLAGS="${UNIVERSAL_ARCH_FLAGS} ${CFLAGS}"
++ if test "${UNIVERSALSDK}" != "/"
++ then
++ CPPFLAGS="-isysroot ${UNIVERSALSDK} ${CPPFLAGS}"
++ LDFLAGS="-isysroot ${UNIVERSALSDK} ${LDFLAGS}"
++ CFLAGS="-isysroot ${UNIVERSALSDK} ${CFLAGS}"
++ fi
++
++ fi
++
++
++ ;;
++ OSF*)
++ BASECFLAGS="$BASECFLAGS -mieee"
++ ;;
++ esac
++ ;;
++
++*)
++ case $ac_sys_system in
++ OpenUNIX*|UnixWare*)
++ BASECFLAGS="$BASECFLAGS -K pentium,host,inline,loop_unroll,alloca "
++ ;;
++ OSF*)
++ BASECFLAGS="$BASECFLAGS -ieee -std"
++ ;;
++ SCO_SV*)
++ BASECFLAGS="$BASECFLAGS -belf -Ki486 -DSCO5"
++ ;;
++ esac
++ ;;
++esac
++
++if test "$Py_DEBUG" = 'true'; then
++ :
++else
++ OPT="-DNDEBUG $OPT"
++fi
++
++if test "$ac_arch_flags"
++then
++ BASECFLAGS="$BASECFLAGS $ac_arch_flags"
++fi
++
++# disable check for icc since it seems to pass, but generates a warning
++if test "$CC" = icc
++then
++ ac_cv_opt_olimit_ok=no
++fi
++
++AC_MSG_CHECKING(whether $CC accepts -OPT:Olimit=0)
++AC_CACHE_VAL(ac_cv_opt_olimit_ok,
++[ac_save_cc="$CC"
++CC="$CC -OPT:Olimit=0"
++AC_COMPILE_IFELSE(
++ [AC_LANG_PROGRAM([[]], [[]])],
++ [ac_cv_opt_olimit_ok=yes],
++ [ac_cv_opt_olimit_ok=no]
++ )
++CC="$ac_save_cc"])
++AC_MSG_RESULT($ac_cv_opt_olimit_ok)
++if test $ac_cv_opt_olimit_ok = yes; then
++ case $ac_sys_system in
++ # XXX is this branch needed? On MacOSX 10.2.2 the result of the
++ # olimit_ok test is "no". Is it "yes" in some other Darwin-esque
++ # environment?
++ Darwin*)
++ ;;
++ *)
++ BASECFLAGS="$BASECFLAGS -OPT:Olimit=0"
++ ;;
++ esac
++else
++ AC_MSG_CHECKING(whether $CC accepts -Olimit 1500)
++ AC_CACHE_VAL(ac_cv_olimit_ok,
++ [ac_save_cc="$CC"
++ CC="$CC -Olimit 1500"
++ AC_COMPILE_IFELSE(
++ [AC_LANG_PROGRAM([[]], [[]])],
++ [ac_cv_olimit_ok=yes],
++ [ac_cv_olimit_ok=no]
++ )
++ CC="$ac_save_cc"])
++ AC_MSG_RESULT($ac_cv_olimit_ok)
++ if test $ac_cv_olimit_ok = yes; then
++ BASECFLAGS="$BASECFLAGS -Olimit 1500"
++ fi
++fi
++
++# Check whether GCC supports PyArg_ParseTuple format
++if test "$GCC" = "yes"
++then
++ AC_MSG_CHECKING(whether gcc supports ParseTuple __format__)
++ save_CFLAGS=$CFLAGS
++ CFLAGS="$CFLAGS -Werror"
++ AC_COMPILE_IFELSE([
++ AC_LANG_PROGRAM([[void f(char*,...)__attribute((format(PyArg_ParseTuple, 1, 2)));]], [[]])
++ ],[
++ AC_DEFINE(HAVE_ATTRIBUTE_FORMAT_PARSETUPLE, 1,
++ [Define if GCC supports __attribute__((format(PyArg_ParseTuple, 2, 3)))])
++ AC_MSG_RESULT(yes)
++ ],[
++ AC_MSG_RESULT(no)
++ ])
++ CFLAGS=$save_CFLAGS
++fi
++
++# On some compilers, pthreads are available without further options
++# (e.g. MacOS X). On some of these systems, the compiler will not
++# complain if unaccepted options are passed (e.g. gcc on Mac OS X).
++# So we have to see first whether pthreads are available without
++# options before we can check whether -Kpthread improves anything.
++AC_MSG_CHECKING(whether pthreads are available without options)
++AC_CACHE_VAL(ac_cv_pthread_is_default,
++[AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <pthread.h>
++
++void* routine(void* p){return NULL;}
++
++int main(){
++ pthread_t p;
++ if(pthread_create(&p,NULL,routine,NULL)!=0)
++ return 1;
++ (void)pthread_detach(p);
++ return 0;
++}
++]])],[
++ ac_cv_pthread_is_default=yes
++ ac_cv_kthread=no
++ ac_cv_pthread=no
++],[ac_cv_pthread_is_default=no],[ac_cv_pthread_is_default=no])
++])
++AC_MSG_RESULT($ac_cv_pthread_is_default)
++
++
++if test $ac_cv_pthread_is_default = yes
++then
++ ac_cv_kpthread=no
++else
++# -Kpthread, if available, provides the right #defines
++# and linker options to make pthread_create available
++# Some compilers won't report that they do not support -Kpthread,
++# so we need to run a program to see whether it really made the
++# function available.
++AC_MSG_CHECKING(whether $CC accepts -Kpthread)
++AC_CACHE_VAL(ac_cv_kpthread,
++[ac_save_cc="$CC"
++CC="$CC -Kpthread"
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <pthread.h>
++
++void* routine(void* p){return NULL;}
++
++int main(){
++ pthread_t p;
++ if(pthread_create(&p,NULL,routine,NULL)!=0)
++ return 1;
++ (void)pthread_detach(p);
++ return 0;
++}
++]])],[ac_cv_kpthread=yes],[ac_cv_kpthread=no],[ac_cv_kpthread=no])
++CC="$ac_save_cc"])
++AC_MSG_RESULT($ac_cv_kpthread)
++fi
++
++if test $ac_cv_kpthread = no -a $ac_cv_pthread_is_default = no
++then
++# -Kthread, if available, provides the right #defines
++# and linker options to make pthread_create available
++# Some compilers won't report that they do not support -Kthread,
++# so we need to run a program to see whether it really made the
++# function available.
++AC_MSG_CHECKING(whether $CC accepts -Kthread)
++AC_CACHE_VAL(ac_cv_kthread,
++[ac_save_cc="$CC"
++CC="$CC -Kthread"
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <pthread.h>
++
++void* routine(void* p){return NULL;}
++
++int main(){
++ pthread_t p;
++ if(pthread_create(&p,NULL,routine,NULL)!=0)
++ return 1;
++ (void)pthread_detach(p);
++ return 0;
++}
++]])],[ac_cv_kthread=yes],[ac_cv_kthread=no],[ac_cv_kthread=no])
++CC="$ac_save_cc"])
++AC_MSG_RESULT($ac_cv_kthread)
++fi
++
++if test $ac_cv_kthread = no -a $ac_cv_pthread_is_default = no
++then
++# -pthread, if available, provides the right #defines
++# and linker options to make pthread_create available
++# Some compilers won't report that they do not support -pthread,
++# so we need to run a program to see whether it really made the
++# function available.
++AC_MSG_CHECKING(whether $CC accepts -pthread)
++AC_CACHE_VAL(ac_cv_thread,
++[ac_save_cc="$CC"
++CC="$CC -pthread"
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <pthread.h>
++
++void* routine(void* p){return NULL;}
++
++int main(){
++ pthread_t p;
++ if(pthread_create(&p,NULL,routine,NULL)!=0)
++ return 1;
++ (void)pthread_detach(p);
++ return 0;
++}
++]])],[ac_cv_pthread=yes],[ac_cv_pthread=no],[ac_cv_pthread=no])
++CC="$ac_save_cc"])
++AC_MSG_RESULT($ac_cv_pthread)
++fi
++
++# If we have set a CC compiler flag for thread support then
++# check if it works for CXX, too.
++ac_cv_cxx_thread=no
++if test ! -z "$CXX"
++then
++AC_MSG_CHECKING(whether $CXX also accepts flags for thread support)
++ac_save_cxx="$CXX"
++
++if test "$ac_cv_kpthread" = "yes"
++then
++ CXX="$CXX -Kpthread"
++ ac_cv_cxx_thread=yes
++elif test "$ac_cv_kthread" = "yes"
++then
++ CXX="$CXX -Kthread"
++ ac_cv_cxx_thread=yes
++elif test "$ac_cv_pthread" = "yes"
++then
++ CXX="$CXX -pthread"
++ ac_cv_cxx_thread=yes
++fi
++
++if test $ac_cv_cxx_thread = yes
++then
++ echo 'void foo();int main(){foo();}void foo(){}' > conftest.$ac_ext
++ $CXX -c conftest.$ac_ext 2>&5
++ if $CXX -o conftest$ac_exeext conftest.$ac_objext 2>&5 \
++ && test -s conftest$ac_exeext && ./conftest$ac_exeext
++ then
++ ac_cv_cxx_thread=yes
++ else
++ ac_cv_cxx_thread=no
++ fi
++ rm -fr conftest*
++fi
++AC_MSG_RESULT($ac_cv_cxx_thread)
++fi
++CXX="$ac_save_cxx"
++
++dnl # check for ANSI or K&R ("traditional") preprocessor
++dnl AC_MSG_CHECKING(for C preprocessor type)
++dnl AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++dnl #define spam(name, doc) {#name, &name, #name "() -- " doc}
++dnl int foo;
++dnl struct {char *name; int *addr; char *doc;} desc = spam(foo, "something");
++dnl ]], [[;]])],[cpp_type=ansi],[AC_DEFINE(HAVE_OLD_CPP) cpp_type=traditional])
++dnl AC_MSG_RESULT($cpp_type)
++
++# checks for header files
++AC_HEADER_STDC
++AC_CHECK_HEADERS(asm/types.h conio.h curses.h direct.h dlfcn.h errno.h \
++fcntl.h grp.h \
++ieeefp.h io.h langinfo.h libintl.h ncurses.h poll.h process.h pthread.h \
++shadow.h signal.h stdint.h stropts.h termios.h thread.h \
++unistd.h utime.h \
++sys/audioio.h sys/bsdtty.h sys/epoll.h sys/event.h sys/file.h sys/loadavg.h \
++sys/lock.h sys/mkdev.h sys/modem.h \
++sys/param.h sys/poll.h sys/select.h sys/socket.h sys/statvfs.h sys/stat.h \
++sys/termio.h sys/time.h \
++sys/times.h sys/types.h sys/un.h sys/utsname.h sys/wait.h pty.h libutil.h \
++sys/resource.h netpacket/packet.h sysexits.h bluetooth.h \
++bluetooth/bluetooth.h linux/tipc.h spawn.h util.h)
++AC_HEADER_DIRENT
++AC_HEADER_MAJOR
++
++# On Solaris, term.h requires curses.h
++AC_CHECK_HEADERS(term.h,,,[
++#ifdef HAVE_CURSES_H
++#include <curses.h>
++#endif
++])
++
++# On Linux, netlink.h requires asm/types.h
++AC_CHECK_HEADERS(linux/netlink.h,,,[
++#ifdef HAVE_ASM_TYPES_H
++#include <asm/types.h>
++#endif
++#ifdef HAVE_SYS_SOCKET_H
++#include <sys/socket.h>
++#endif
++])
++
++# checks for typedefs
++was_it_defined=no
++AC_MSG_CHECKING(for clock_t in time.h)
++AC_EGREP_HEADER(clock_t, time.h, was_it_defined=yes, [
++ AC_DEFINE(clock_t, long, [Define to 'long' if <time.h> doesn't define.])
++])
++AC_MSG_RESULT($was_it_defined)
++
++# Check whether using makedev requires defining _OSF_SOURCE
++AC_MSG_CHECKING(for makedev)
++AC_LINK_IFELSE([AC_LANG_PROGRAM([[
++#if defined(MAJOR_IN_MKDEV)
++#include <sys/mkdev.h>
++#elif defined(MAJOR_IN_SYSMACROS)
++#include <sys/sysmacros.h>
++#else
++#include <sys/types.h>
++#endif ]], [[ makedev(0, 0) ]])],
++[ac_cv_has_makedev=yes],
++[ac_cv_has_makedev=no])
++if test "$ac_cv_has_makedev" = "no"; then
++ # we didn't link, try if _OSF_SOURCE will allow us to link
++ AC_LINK_IFELSE([AC_LANG_PROGRAM([[
++#define _OSF_SOURCE 1
++#include <sys/types.h>
++ ]], [[ makedev(0, 0) ]])],
++[ac_cv_has_makedev=yes],
++[ac_cv_has_makedev=no])
++ if test "$ac_cv_has_makedev" = "yes"; then
++ AC_DEFINE(_OSF_SOURCE, 1, [Define _OSF_SOURCE to get the makedev macro.])
++ fi
++fi
++AC_MSG_RESULT($ac_cv_has_makedev)
++if test "$ac_cv_has_makedev" = "yes"; then
++ AC_DEFINE(HAVE_MAKEDEV, 1, [Define this if you have the makedev macro.])
++fi
++
++# Enabling LFS on Solaris (2.6 to 9) with gcc 2.95 triggers a bug in
++# the system headers: If _XOPEN_SOURCE and _LARGEFILE_SOURCE are
++# defined, but the compiler does not support pragma redefine_extname,
++# and _LARGEFILE64_SOURCE is not defined, the headers refer to 64-bit
++# structures (such as rlimit64) without declaring them. As a
++# work-around, disable LFS on such configurations
++
++use_lfs=yes
++AC_MSG_CHECKING(Solaris LFS bug)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++#define _LARGEFILE_SOURCE 1
++#define _FILE_OFFSET_BITS 64
++#include <sys/resource.h>
++]], [[struct rlimit foo;]])],[sol_lfs_bug=no],[sol_lfs_bug=yes])
++AC_MSG_RESULT($sol_lfs_bug)
++if test "$sol_lfs_bug" = "yes"; then
++ use_lfs=no
++fi
++
++if test "$use_lfs" = "yes"; then
++# Two defines needed to enable largefile support on various platforms
++# These may affect some typedefs
++case $ac_sys_system/$ac_sys_release in
++AIX*)
++ AC_DEFINE(_LARGE_FILES, 1,
++ [This must be defined on AIX systems to enable large file support.])
++ ;;
++esac
++AC_DEFINE(_LARGEFILE_SOURCE, 1,
++[This must be defined on some systems to enable large file support.])
++AC_DEFINE(_FILE_OFFSET_BITS, 64,
++[This must be set to 64 on some systems to enable large file support.])
++fi
++
++# Add some code to confdefs.h so that the test for off_t works on SCO
++cat >> confdefs.h <<\EOF
++#if defined(SCO_DS)
++#undef _OFF_T
++#endif
++EOF
++
++# Type availability checks
++AC_TYPE_MODE_T
++AC_TYPE_OFF_T
++AC_TYPE_PID_T
++AC_DEFINE_UNQUOTED([RETSIGTYPE],[void],[assume C89 semantics that RETSIGTYPE is always void])
++AC_TYPE_SIZE_T
++AC_TYPE_UID_T
++AC_TYPE_UINT32_T
++AC_TYPE_UINT64_T
++AC_TYPE_INT32_T
++AC_TYPE_INT64_T
++AC_CHECK_TYPE(ssize_t,
++ AC_DEFINE(HAVE_SSIZE_T, 1, [Define if your compiler provides ssize_t]),,)
++
++# Sizes of various common basic types
++# ANSI C requires sizeof(char) == 1, so no need to check it
++AC_CHECK_SIZEOF(int, 4)
++AC_CHECK_SIZEOF(long, 4)
++AC_CHECK_SIZEOF(void *, 4)
++AC_CHECK_SIZEOF(short, 2)
++AC_CHECK_SIZEOF(float, 4)
++AC_CHECK_SIZEOF(double, 8)
++AC_CHECK_SIZEOF(fpos_t, 4)
++AC_CHECK_SIZEOF(size_t, 4)
++AC_CHECK_SIZEOF(pid_t, 4)
++
++AC_MSG_CHECKING(for long long support)
++have_long_long=no
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[long long x; x = (long long)0;]])],[
++ AC_DEFINE(HAVE_LONG_LONG, 1, [Define this if you have the type long long.])
++ have_long_long=yes
++],[])
++AC_MSG_RESULT($have_long_long)
++if test "$have_long_long" = yes ; then
++AC_CHECK_SIZEOF(long long, 8)
++fi
++
++AC_MSG_CHECKING(for long double support)
++have_long_double=no
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[long double x; x = (long double)0;]])],[
++ AC_DEFINE(HAVE_LONG_DOUBLE, 1, [Define this if you have the type long double.])
++ have_long_double=yes
++],[])
++AC_MSG_RESULT($have_long_double)
++if test "$have_long_double" = yes ; then
++AC_CHECK_SIZEOF(long double, 12)
++fi
++
++AC_MSG_CHECKING(for _Bool support)
++have_c99_bool=no
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[_Bool x; x = (_Bool)0;]])],[
++ AC_DEFINE(HAVE_C99_BOOL, 1, [Define this if you have the type _Bool.])
++ have_c99_bool=yes
++],[])
++AC_MSG_RESULT($have_c99_bool)
++if test "$have_c99_bool" = yes ; then
++AC_CHECK_SIZEOF(_Bool, 1)
++fi
++
++AC_CHECK_TYPES(uintptr_t,
++ [AC_CHECK_SIZEOF(uintptr_t, 4)],
++ [], [#ifdef HAVE_STDINT_H
++ #include <stdint.h>
++ #endif
++ #ifdef HAVE_INTTYPES_H
++ #include <inttypes.h>
++ #endif])
++
++AC_CHECK_SIZEOF(off_t, [], [
++#ifdef HAVE_SYS_TYPES_H
++#include <sys/types.h>
++#endif
++])
++
++AC_MSG_CHECKING(whether to enable large file support)
++if test "$have_long_long" = yes
++then
++if test "$ac_cv_sizeof_off_t" -gt "$ac_cv_sizeof_long" -a \
++ "$ac_cv_sizeof_long_long" -ge "$ac_cv_sizeof_off_t"; then
++ AC_DEFINE(HAVE_LARGEFILE_SUPPORT, 1,
++ [Defined to enable large file support when an off_t is bigger than a long
++ and long long is available and at least as big as an off_t. You may need
++ to add some flags for configuration and compilation to enable this mode.
++ (For Solaris and Linux, the necessary defines are already defined.)])
++ AC_MSG_RESULT(yes)
++else
++ AC_MSG_RESULT(no)
++fi
++else
++ AC_MSG_RESULT(no)
++fi
++
++AC_CHECK_SIZEOF(time_t, [], [
++#ifdef HAVE_SYS_TYPES_H
++#include <sys/types.h>
++#endif
++#ifdef HAVE_TIME_H
++#include <time.h>
++#endif
++])
++
++# if have pthread_t then define SIZEOF_PTHREAD_T
++ac_save_cc="$CC"
++if test "$ac_cv_kpthread" = "yes"
++then CC="$CC -Kpthread"
++elif test "$ac_cv_kthread" = "yes"
++then CC="$CC -Kthread"
++elif test "$ac_cv_pthread" = "yes"
++then CC="$CC -pthread"
++fi
++AC_MSG_CHECKING(for pthread_t)
++have_pthread_t=no
++AC_COMPILE_IFELSE([
++ AC_LANG_PROGRAM([[#include <pthread.h>]], [[pthread_t x; x = *(pthread_t*)0;]])
++],[have_pthread_t=yes],[])
++AC_MSG_RESULT($have_pthread_t)
++if test "$have_pthread_t" = yes ; then
++ AC_CHECK_SIZEOF(pthread_t, [], [
++#ifdef HAVE_PTHREAD_H
++#include <pthread.h>
++#endif
++ ])
++fi
++CC="$ac_save_cc"
++
++AC_MSG_CHECKING(for --enable-toolbox-glue)
++AC_ARG_ENABLE(toolbox-glue,
++ AS_HELP_STRING([--enable-toolbox-glue], [disable/enable MacOSX glue code for extensions]))
++
++if test -z "$enable_toolbox_glue"
++then
++ case $ac_sys_system/$ac_sys_release in
++ Darwin/*)
++ enable_toolbox_glue="yes";;
++ *)
++ enable_toolbox_glue="no";;
++ esac
++fi
++case "$enable_toolbox_glue" in
++yes)
++ extra_machdep_objs="Python/mactoolboxglue.o"
++ extra_undefs="-u _PyMac_Error"
++ AC_DEFINE(USE_TOOLBOX_OBJECT_GLUE, 1,
++ [Define if you want to use MacPython modules on MacOSX in unix-Python.])
++ ;;
++*)
++ extra_machdep_objs=""
++ extra_undefs=""
++ ;;
++esac
++AC_MSG_RESULT($enable_toolbox_glue)
++
++
++AC_SUBST(OTHER_LIBTOOL_OPT)
++case $ac_sys_system/$ac_sys_release in
++ Darwin/@<:@01567@:>@\..*)
++ OTHER_LIBTOOL_OPT="-prebind -seg1addr 0x10000000"
++ ;;
++ Darwin/*)
++ OTHER_LIBTOOL_OPT=""
++ ;;
++esac
++
++
++ARCH_RUN_32BIT=""
++AC_SUBST(LIBTOOL_CRUFT)
++case $ac_sys_system/$ac_sys_release in
++ Darwin/@<:@01567@:>@\..*)
++ LIBTOOL_CRUFT="-framework System -lcc_dynamic"
++ if test "${enable_universalsdk}"; then
++ :
++ else
++ LIBTOOL_CRUFT="${LIBTOOL_CRUFT} -arch_only `/usr/bin/arch`"
++ fi
++ LIBTOOL_CRUFT=$LIBTOOL_CRUFT' -install_name $(PYTHONFRAMEWORKINSTALLDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ LIBTOOL_CRUFT=$LIBTOOL_CRUFT' -compatibility_version $(VERSION) -current_version $(VERSION)';;
++ Darwin/*)
++ gcc_version=`gcc -dumpversion`
++ if test ${gcc_version} '<' 4.0
++ then
++ LIBTOOL_CRUFT="-lcc_dynamic"
++ else
++ LIBTOOL_CRUFT=""
++ fi
++ AC_RUN_IFELSE([AC_LANG_SOURCE([[
++ #include <unistd.h>
++ int main(int argc, char*argv[])
++ {
++ if (sizeof(long) == 4) {
++ return 0;
++ } else {
++ return 1;
++ }
++ }
++ ]])],[ac_osx_32bit=yes],[ac_osx_32bit=no],[ac_osx_32bit=yes])
++
++ if test "${ac_osx_32bit}" = "yes"; then
++ case `/usr/bin/arch` in
++ i386)
++ MACOSX_DEFAULT_ARCH="i386"
++ ;;
++ ppc)
++ MACOSX_DEFAULT_ARCH="ppc"
++ ;;
++ *)
++ AC_MSG_ERROR([Unexpected output of 'arch' on OSX])
++ ;;
++ esac
++ else
++ case `/usr/bin/arch` in
++ i386)
++ MACOSX_DEFAULT_ARCH="x86_64"
++ ;;
++ ppc)
++ MACOSX_DEFAULT_ARCH="ppc64"
++ ;;
++ *)
++ AC_MSG_ERROR([Unexpected output of 'arch' on OSX])
++ ;;
++ esac
++
++ #ARCH_RUN_32BIT="true"
++ fi
++
++ LIBTOOL_CRUFT=$LIBTOOL_CRUFT" -lSystem -lSystemStubs -arch_only ${MACOSX_DEFAULT_ARCH}"
++ LIBTOOL_CRUFT=$LIBTOOL_CRUFT' -install_name $(PYTHONFRAMEWORKINSTALLDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ LIBTOOL_CRUFT=$LIBTOOL_CRUFT' -compatibility_version $(VERSION) -current_version $(VERSION)';;
++esac
++
++AC_MSG_CHECKING(for --enable-framework)
++if test "$enable_framework"
++then
++ BASECFLAGS="$BASECFLAGS -fno-common -dynamic"
++ # -F. is needed to allow linking to the framework while
++ # in the build location.
++ AC_DEFINE(WITH_NEXT_FRAMEWORK, 1,
++ [Define if you want to produce an OpenStep/Rhapsody framework
++ (shared library plus accessory files).])
++ AC_MSG_RESULT(yes)
++ if test $enable_shared = "yes"
++ then
++ AC_MSG_ERROR([Specifying both --enable-shared and --enable-framework is not supported, use only --enable-framework instead. See Mac/README.])
++ fi
++else
++ AC_MSG_RESULT(no)
++fi
++
++AC_MSG_CHECKING(for dyld)
++case $ac_sys_system/$ac_sys_release in
++ Darwin/*)
++ AC_DEFINE(WITH_DYLD, 1,
++ [Define if you want to use the new-style (Openstep, Rhapsody, MacOS)
++ dynamic linker (dyld) instead of the old-style (NextStep) dynamic
++ linker (rld). Dyld is necessary to support frameworks.])
++ AC_MSG_RESULT(always on for Darwin)
++ ;;
++ *)
++ AC_MSG_RESULT(no)
++ ;;
++esac
++
++# Set info about shared libraries.
++AC_SUBST(SO)
++AC_SUBST(LDSHARED)
++AC_SUBST(LDCXXSHARED)
++AC_SUBST(BLDSHARED)
++AC_SUBST(CCSHARED)
++AC_SUBST(LINKFORSHARED)
++# SO is the extension of shared libraries `(including the dot!)
++# -- usually .so, .sl on HP-UX, .dll on Cygwin
++AC_MSG_CHECKING(SO)
++if test -z "$SO"
++then
++ case $ac_sys_system in
++ hp*|HP*)
++ case `uname -m` in
++ ia64) SO=.so;;
++ *) SO=.sl;;
++ esac
++ ;;
++ CYGWIN*) SO=.dll;;
++ *) SO=.so;;
++ esac
++else
++ # this might also be a termcap variable, see #610332
++ echo
++ echo '====================================================================='
++ echo '+ +'
++ echo '+ WARNING: You have set SO in your environment. +'
++ echo '+ Do you really mean to change the extension for shared libraries? +'
++ echo '+ Continuing in 10 seconds to let you to ponder. +'
++ echo '+ +'
++ echo '====================================================================='
++ sleep 10
++fi
++AC_MSG_RESULT($SO)
++
++AC_DEFINE_UNQUOTED(SHLIB_EXT, "$SO", [Define this to be extension of shared libraries (including the dot!).])
++# LDSHARED is the ld *command* used to create shared library
++# -- "cc -G" on SunOS 5.x, "ld -shared" on IRIX 5
++# (Shared libraries in this instance are shared modules to be loaded into
++# Python, as opposed to building Python itself as a shared library.)
++AC_MSG_CHECKING(LDSHARED)
++if test -z "$LDSHARED"
++then
++ case $ac_sys_system/$ac_sys_release in
++ AIX*)
++ BLDSHARED="\$(srcdir)/Modules/ld_so_aix \$(CC) -bI:\$(srcdir)/Modules/python.exp"
++ LDSHARED="\$(BINLIBDEST)/config/ld_so_aix \$(CC) -bI:\$(BINLIBDEST)/config/python.exp"
++ ;;
++ BeOS*)
++ BLDSHARED="\$(srcdir)/Modules/ld_so_beos $LDLIBRARY"
++ LDSHARED="\$(BINLIBDEST)/config/ld_so_beos \$(LIBDIR)/$LDLIBRARY"
++ ;;
++ IRIX/5*) LDSHARED="ld -shared";;
++ IRIX*/6*) LDSHARED="ld ${SGI_ABI} -shared -all";;
++ SunOS/5*)
++ if test "$GCC" = "yes" ; then
++ LDSHARED='$(CC) -shared'
++ LDCXXSHARED='$(CXX) -shared'
++ else
++ LDSHARED='$(CC) -G'
++ LDCXXSHARED='$(CXX) -G'
++ fi ;;
++ hp*|HP*)
++ if test "$GCC" = "yes" ; then
++ LDSHARED='$(CC) -shared'
++ LDCXXSHARED='$(CXX) -shared'
++ else
++ LDSHARED='ld -b'
++ fi ;;
++ OSF*) LDSHARED="ld -shared -expect_unresolved \"*\"";;
++ Darwin/1.3*)
++ LDSHARED='$(CC) -bundle'
++ LDCXXSHARED='$(CXX) -bundle'
++ if test "$enable_framework" ; then
++ # Link against the framework. All externals should be defined.
++ BLDSHARED="$LDSHARED "'$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ LDSHARED="$LDSHARED "'$(PYTHONFRAMEWORKPREFIX)/$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ LDCXXSHARED="$LDCXXSHARED "'$(PYTHONFRAMEWORKPREFIX)/$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ else
++ # No framework. Ignore undefined symbols, assuming they come from Python
++ LDSHARED="$LDSHARED -undefined suppress"
++ LDCXXSHARED="$LDCXXSHARED -undefined suppress"
++ fi ;;
++ Darwin/1.4*|Darwin/5.*|Darwin/6.*)
++ LDSHARED='$(CC) -bundle'
++ LDCXXSHARED='$(CXX) -bundle'
++ if test "$enable_framework" ; then
++ # Link against the framework. All externals should be defined.
++ BLDSHARED="$LDSHARED "'$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ LDSHARED="$LDSHARED "'$(PYTHONFRAMEWORKPREFIX)/$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ LDCXXSHARED="$LDCXXSHARED "'$(PYTHONFRAMEWORKPREFIX)/$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ else
++ # No framework, use the Python app as bundle-loader
++ BLDSHARED="$LDSHARED "'-bundle_loader $(BUILDPYTHON)'
++ LDSHARED="$LDSHARED "'-bundle_loader $(BINDIR)/python$(VERSION)$(EXE)'
++ LDCXXSHARED="$LDCXXSHARED "'-bundle_loader $(BINDIR)/python$(VERSION)$(EXE)'
++ fi ;;
++ Darwin/*)
++ # Use -undefined dynamic_lookup whenever possible (10.3 and later).
++ # This allows an extension to be used in any Python
++
++ if test ${MACOSX_DEPLOYMENT_TARGET} '>' 10.2
++ then
++ if test "${enable_universalsdk}"; then
++ LDFLAGS="${UNIVERSAL_ARCH_FLAGS} -isysroot ${UNIVERSALSDK} ${LDFLAGS}"
++ fi
++ LDSHARED='$(CC) -bundle -undefined dynamic_lookup'
++ LDCXXSHARED='$(CXX) -bundle -undefined dynamic_lookup'
++ BLDSHARED="$LDSHARED"
++ else
++ LDSHARED='$(CC) -bundle'
++ LDCXXSHARED='$(CXX) -bundle'
++ if test "$enable_framework" ; then
++ # Link against the framework. All externals should be defined.
++ BLDSHARED="$LDSHARED "'$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ LDSHARED="$LDSHARED "'$(PYTHONFRAMEWORKPREFIX)/$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ LDCXXSHARED="$LDCXXSHARED "'$(PYTHONFRAMEWORKPREFIX)/$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ else
++ # No framework, use the Python app as bundle-loader
++ BLDSHARED="$LDSHARED "'-bundle_loader $(BUILDPYTHON)'
++ LDSHARED="$LDSHARED "'-bundle_loader $(BINDIR)/python$(VERSION)$(EXE)'
++ LDCXXSHARED="$LDCXXSHARED "'-bundle_loader $(BINDIR)/python$(VERSION)$(EXE)'
++ fi
++ fi
++ ;;
++ Linux*|GNU*|QNX*)
++ LDSHARED='$(CC) -shared'
++ LDCXXSHARED='$(CXX) -shared';;
++ BSD/OS*/4*)
++ LDSHARED="gcc -shared"
++ LDCXXSHARED="g++ -shared";;
++ FreeBSD*)
++ if [[ "`$CC -dM -E - </dev/null | grep __ELF__`" != "" ]]
++ then
++ LDSHARED='$(CC) -shared'
++ LDCXXSHARED='$(CXX) -shared'
++ else
++ LDSHARED="ld -Bshareable"
++ fi;;
++ OpenBSD*)
++ if [[ "`$CC -dM -E - </dev/null | grep __ELF__`" != "" ]]
++ then
++ LDSHARED='$(CC) -shared $(CCSHARED)'
++ LDCXXSHARED='$(CXX) -shared $(CCSHARED)'
++ else
++ case `uname -r` in
++ [[01]].* | 2.[[0-7]] | 2.[[0-7]].*)
++ LDSHARED="ld -Bshareable ${LDFLAGS}"
++ ;;
++ *)
++ LDSHARED='$(CC) -shared $(CCSHARED)'
++ LDCXXSHARED='$(CXX) -shared $(CCSHARED)'
++ ;;
++ esac
++ fi;;
++ NetBSD*|DragonFly*)
++ LDSHARED='$(CC) -shared'
++ LDCXXSHARED='$(CXX) -shared';;
++ OpenUNIX*|UnixWare*)
++ if test "$GCC" = "yes" ; then
++ LDSHARED='$(CC) -shared'
++ LDCXXSHARED='$(CXX) -shared'
++ else
++ LDSHARED='$(CC) -G'
++ LDCXXSHARED='$(CXX) -G'
++ fi;;
++ SCO_SV*)
++ LDSHARED='$(CC) -Wl,-G,-Bexport'
++ LDCXXSHARED='$(CXX) -Wl,-G,-Bexport';;
++ CYGWIN*)
++ LDSHARED="gcc -shared -Wl,--enable-auto-image-base"
++ LDCXXSHARED="g++ -shared -Wl,--enable-auto-image-base";;
++ atheos*)
++ LDSHARED="gcc -shared"
++ LDCXXSHARED="g++ -shared";;
++ *) LDSHARED="ld";;
++ esac
++fi
++AC_MSG_RESULT($LDSHARED)
++LDCXXSHARED=${LDCXXSHARED-$LDSHARED}
++BLDSHARED=${BLDSHARED-$LDSHARED}
++# CCSHARED are the C *flags* used to create objects to go into a shared
++# library (module) -- this is only needed for a few systems
++AC_MSG_CHECKING(CCSHARED)
++if test -z "$CCSHARED"
++then
++ case $ac_sys_system/$ac_sys_release in
++ SunOS*) if test "$GCC" = yes;
++ then CCSHARED="-fPIC";
++ elif test `uname -p` = sparc;
++ then CCSHARED="-xcode=pic32";
++ else CCSHARED="-Kpic";
++ fi;;
++ hp*|HP*) if test "$GCC" = yes;
++ then CCSHARED="-fPIC";
++ else CCSHARED="+z";
++ fi;;
++ Linux*|GNU*) CCSHARED="-fPIC";;
++ BSD/OS*/4*) CCSHARED="-fpic";;
++ FreeBSD*|NetBSD*|OpenBSD*|DragonFly*) CCSHARED="-fPIC";;
++ OpenUNIX*|UnixWare*)
++ if test "$GCC" = "yes"
++ then CCSHARED="-fPIC"
++ else CCSHARED="-KPIC"
++ fi;;
++ SCO_SV*)
++ if test "$GCC" = "yes"
++ then CCSHARED="-fPIC"
++ else CCSHARED="-Kpic -belf"
++ fi;;
++ IRIX*/6*) case $CC in
++ *gcc*) CCSHARED="-shared";;
++ *) CCSHARED="";;
++ esac;;
++ atheos*) CCSHARED="-fPIC";;
++ esac
++fi
++AC_MSG_RESULT($CCSHARED)
++# LINKFORSHARED are the flags passed to the $(CC) command that links
++# the python executable -- this is only needed for a few systems
++AC_MSG_CHECKING(LINKFORSHARED)
++if test -z "$LINKFORSHARED"
++then
++ case $ac_sys_system/$ac_sys_release in
++ AIX*) LINKFORSHARED='-Wl,-bE:Modules/python.exp -lld';;
++ hp*|HP*)
++ LINKFORSHARED="-Wl,-E -Wl,+s";;
++# LINKFORSHARED="-Wl,-E -Wl,+s -Wl,+b\$(BINLIBDEST)/lib-dynload";;
++ BSD/OS/4*) LINKFORSHARED="-Xlinker -export-dynamic";;
++ Linux*|GNU*) LINKFORSHARED="-Xlinker -export-dynamic";;
++ # -u libsys_s pulls in all symbols in libsys
++ Darwin/*)
++ # -u _PyMac_Error is needed to pull in the mac toolbox glue,
++ # which is
++ # not used by the core itself but which needs to be in the core so
++ # that dynamically loaded extension modules have access to it.
++ # -prebind is no longer used, because it actually seems to give a
++ # slowdown in stead of a speedup, maybe due to the large number of
++ # dynamic loads Python does.
++
++ LINKFORSHARED="$extra_undefs"
++ if test "$enable_framework"
++ then
++ LINKFORSHARED="$LINKFORSHARED "'$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
++ fi
++ LINKFORSHARED="$LINKFORSHARED";;
++ OpenUNIX*|UnixWare*) LINKFORSHARED="-Wl,-Bexport";;
++ SCO_SV*) LINKFORSHARED="-Wl,-Bexport";;
++ ReliantUNIX*) LINKFORSHARED="-W1 -Blargedynsym";;
++ FreeBSD*|NetBSD*|OpenBSD*|DragonFly*)
++ if [[ "`$CC -dM -E - </dev/null | grep __ELF__`" != "" ]]
++ then
++ LINKFORSHARED="-Wl,--export-dynamic"
++ fi;;
++ SunOS/5*) case $CC in
++ *gcc*)
++ if $CC -Xlinker --help 2>&1 | grep export-dynamic >/dev/null
++ then
++ LINKFORSHARED="-Xlinker --export-dynamic"
++ fi;;
++ esac;;
++ CYGWIN*)
++ if test $enable_shared = "no"
++ then
++ LINKFORSHARED='-Wl,--out-implib=$(LDLIBRARY)'
++ fi;;
++ QNX*)
++ # -Wl,-E causes the symbols to be added to the dynamic
++ # symbol table so that they can be found when a module
++ # is loaded. -N 2048K causes the stack size to be set
++ # to 2048 kilobytes so that the stack doesn't overflow
++ # when running test_compile.py.
++ LINKFORSHARED='-Wl,-E -N 2048K';;
++ esac
++fi
++AC_MSG_RESULT($LINKFORSHARED)
++
++
++AC_SUBST(CFLAGSFORSHARED)
++AC_MSG_CHECKING(CFLAGSFORSHARED)
++if test ! "$LIBRARY" = "$LDLIBRARY"
++then
++ case $ac_sys_system in
++ CYGWIN*)
++ # Cygwin needs CCSHARED when building extension DLLs
++ # but not when building the interpreter DLL.
++ CFLAGSFORSHARED='';;
++ *)
++ CFLAGSFORSHARED='$(CCSHARED)'
++ esac
++fi
++AC_MSG_RESULT($CFLAGSFORSHARED)
++
++# SHLIBS are libraries (except -lc and -lm) to link to the python shared
++# library (with --enable-shared).
++# For platforms on which shared libraries are not allowed to have unresolved
++# symbols, this must be set to $(LIBS) (expanded by make). We do this even
++# if it is not required, since it creates a dependency of the shared library
++# to LIBS. This, in turn, means that applications linking the shared libpython
++# don't need to link LIBS explicitly. The default should be only changed
++# on systems where this approach causes problems.
++AC_SUBST(SHLIBS)
++AC_MSG_CHECKING(SHLIBS)
++case "$ac_sys_system" in
++ *)
++ SHLIBS='$(LIBS)';;
++esac
++AC_MSG_RESULT($SHLIBS)
++
++
++# checks for libraries
++AC_CHECK_LIB(dl, dlopen) # Dynamic linking for SunOS/Solaris and SYSV
++AC_CHECK_LIB(dld, shl_load) # Dynamic linking for HP-UX
++
++# only check for sem_init if thread support is requested
++if test "$with_threads" = "yes" -o -z "$with_threads"; then
++ AC_SEARCH_LIBS(sem_init, pthread rt posix4) # 'Real Time' functions on Solaris
++ # posix4 on Solaris 2.6
++ # pthread (first!) on Linux
++fi
++
++# check if we need libintl for locale functions
++AC_CHECK_LIB(intl, textdomain,
++ AC_DEFINE(WITH_LIBINTL, 1,
++ [Define to 1 if libintl is needed for locale functions.]))
++
++# checks for system dependent C++ extensions support
++case "$ac_sys_system" in
++ AIX*) AC_MSG_CHECKING(for genuine AIX C++ extensions support)
++ AC_LINK_IFELSE([
++ AC_LANG_PROGRAM([[#include <load.h>]],
++ [[loadAndInit("", 0, "")]])
++ ],[
++ AC_DEFINE(AIX_GENUINE_CPLUSPLUS, 1,
++ [Define for AIX if your compiler is a genuine IBM xlC/xlC_r
++ and you want support for AIX C++ shared extension modules.])
++ AC_MSG_RESULT(yes)
++ ],[
++ AC_MSG_RESULT(no)
++ ]);;
++ *) ;;
++esac
++
++# Most SVR4 platforms (e.g. Solaris) need -lsocket and -lnsl.
++# BeOS' sockets are stashed in libnet.
++AC_CHECK_LIB(nsl, t_open, [LIBS="-lnsl $LIBS"]) # SVR4
++AC_CHECK_LIB(socket, socket, [LIBS="-lsocket $LIBS"], [], $LIBS) # SVR4 sockets
++
++case "$ac_sys_system" in
++BeOS*)
++AC_CHECK_LIB(net, socket, [LIBS="-lnet $LIBS"], [], $LIBS) # BeOS
++;;
++esac
++
++AC_MSG_CHECKING(for --with-libs)
++AC_ARG_WITH(libs,
++ AS_HELP_STRING([--with-libs='lib1 ...'], [link against additional libs]),
++[
++AC_MSG_RESULT($withval)
++LIBS="$withval $LIBS"
++],
++[AC_MSG_RESULT(no)])
++
++AC_PATH_TOOL([PKG_CONFIG], [pkg-config])
++
++# Check for use of the system expat library
++AC_MSG_CHECKING(for --with-system-expat)
++AC_ARG_WITH(system_expat,
++ AS_HELP_STRING([--with-system-expat], [build pyexpat module using an installed expat library]),
++ [],
++ [with_system_expat="no"])
++
++AC_MSG_RESULT($with_system_expat)
++
++# Check for use of the system libffi library
++AC_MSG_CHECKING(for --with-system-ffi)
++AC_ARG_WITH(system_ffi,
++ AS_HELP_STRING([--with-system-ffi], [build _ctypes module using an installed ffi library]),
++ [],
++ [with_system_ffi="no"])
++
++if test "$with_system_ffi" = "yes" && test -n "$PKG_CONFIG"; then
++ LIBFFI_INCLUDEDIR="`"$PKG_CONFIG" libffi --cflags-only-I 2>/dev/null | sed -e 's/^-I//;s/ *$//'`"
++else
++ LIBFFI_INCLUDEDIR=""
++fi
++AC_SUBST(LIBFFI_INCLUDEDIR)
++
++AC_MSG_RESULT($with_system_ffi)
++
++# Check for --with-dbmliborder
++AC_MSG_CHECKING(for --with-dbmliborder)
++AC_ARG_WITH(dbmliborder,
++ AS_HELP_STRING([--with-dbmliborder=db1:db2:...], [order to check db backends for dbm. Valid value is a colon separated string with the backend names `ndbm', `gdbm' and `bdb'.]),
++[
++if test x$with_dbmliborder = xyes
++then
++AC_MSG_ERROR([proper usage is --with-dbmliborder=db1:db2:...])
++else
++ for db in `echo $with_dbmliborder | sed 's/:/ /g'`; do
++ if test x$db != xndbm && test x$db != xgdbm && test x$db != xbdb
++ then
++ AC_MSG_ERROR([proper usage is --with-dbmliborder=db1:db2:...])
++ fi
++ done
++fi])
++AC_MSG_RESULT($with_dbmliborder)
++
++# Determine if signalmodule should be used.
++AC_SUBST(USE_SIGNAL_MODULE)
++AC_SUBST(SIGNAL_OBJS)
++AC_MSG_CHECKING(for --with-signal-module)
++AC_ARG_WITH(signal-module,
++ AS_HELP_STRING([--with-signal-module], [disable/enable signal module]))
++
++if test -z "$with_signal_module"
++then with_signal_module="yes"
++fi
++AC_MSG_RESULT($with_signal_module)
++
++if test "${with_signal_module}" = "yes"; then
++ USE_SIGNAL_MODULE=""
++ SIGNAL_OBJS=""
++else
++ USE_SIGNAL_MODULE="#"
++ SIGNAL_OBJS="Parser/intrcheck.o Python/sigcheck.o"
++fi
++
++# This is used to generate Setup.config
++AC_SUBST(USE_THREAD_MODULE)
++USE_THREAD_MODULE=""
++
++AC_MSG_CHECKING(for --with-dec-threads)
++AC_SUBST(LDLAST)
++AC_ARG_WITH(dec-threads,
++ AS_HELP_STRING([--with-dec-threads], [use DEC Alpha/OSF1 thread-safe libraries]),
++[
++AC_MSG_RESULT($withval)
++LDLAST=-threads
++if test "${with_thread+set}" != set; then
++ with_thread="$withval";
++fi],
++[AC_MSG_RESULT(no)])
++
++# Templates for things AC_DEFINEd more than once.
++# For a single AC_DEFINE, no template is needed.
++AH_TEMPLATE(C_THREADS,[Define if you have the Mach cthreads package])
++AH_TEMPLATE(_REENTRANT,
++ [Define to force use of thread-safe errno, h_errno, and other functions])
++AH_TEMPLATE(WITH_THREAD,
++ [Define if you want to compile in rudimentary thread support])
++
++AC_MSG_CHECKING(for --with-threads)
++dnl quadrigraphs "@<:@" and "@:>@" produce "[" and "]" in the output
++AC_ARG_WITH(threads,
++ AS_HELP_STRING([--with(out)-threads@<:@=DIRECTORY@:>@], [disable/enable thread support]))
++
++# --with-thread is deprecated, but check for it anyway
++dnl quadrigraphs "@<:@" and "@:>@" produce "[" and "]" in the output
++AC_ARG_WITH(thread,
++ AS_HELP_STRING([--with(out)-thread@<:@=DIRECTORY@:>@], [deprecated; use --with(out)-threads]),
++ [with_threads=$with_thread])
++
++if test -z "$with_threads"
++then with_threads="yes"
++fi
++AC_MSG_RESULT($with_threads)
++
++AC_SUBST(THREADOBJ)
++if test "$with_threads" = "no"
++then
++ USE_THREAD_MODULE="#"
++elif test "$ac_cv_pthread_is_default" = yes
++then
++ AC_DEFINE(WITH_THREAD)
++ # Defining _REENTRANT on system with POSIX threads should not hurt.
++ AC_DEFINE(_REENTRANT)
++ posix_threads=yes
++ THREADOBJ="Python/thread.o"
++elif test "$ac_cv_kpthread" = "yes"
++then
++ CC="$CC -Kpthread"
++ if test "$ac_cv_cxx_thread" = "yes"; then
++ CXX="$CXX -Kpthread"
++ fi
++ AC_DEFINE(WITH_THREAD)
++ posix_threads=yes
++ THREADOBJ="Python/thread.o"
++elif test "$ac_cv_kthread" = "yes"
++then
++ CC="$CC -Kthread"
++ if test "$ac_cv_cxx_thread" = "yes"; then
++ CXX="$CXX -Kthread"
++ fi
++ AC_DEFINE(WITH_THREAD)
++ posix_threads=yes
++ THREADOBJ="Python/thread.o"
++elif test "$ac_cv_pthread" = "yes"
++then
++ CC="$CC -pthread"
++ if test "$ac_cv_cxx_thread" = "yes"; then
++ CXX="$CXX -pthread"
++ fi
++ AC_DEFINE(WITH_THREAD)
++ posix_threads=yes
++ THREADOBJ="Python/thread.o"
++else
++ if test ! -z "$with_threads" -a -d "$with_threads"
++ then LDFLAGS="$LDFLAGS -L$with_threads"
++ fi
++ if test ! -z "$withval" -a -d "$withval"
++ then LDFLAGS="$LDFLAGS -L$withval"
++ fi
++
++ # According to the POSIX spec, a pthreads implementation must
++ # define _POSIX_THREADS in unistd.h. Some apparently don't
++ # (e.g. gnu pth with pthread emulation)
++ AC_MSG_CHECKING(for _POSIX_THREADS in unistd.h)
++ AC_EGREP_CPP(yes,
++ [
++#include <unistd.h>
++#ifdef _POSIX_THREADS
++yes
++#endif
++ ], unistd_defines_pthreads=yes, unistd_defines_pthreads=no)
++ AC_MSG_RESULT($unistd_defines_pthreads)
++
++ AC_DEFINE(_REENTRANT)
++ AC_CHECK_HEADER(cthreads.h, [AC_DEFINE(WITH_THREAD)
++ AC_DEFINE(C_THREADS)
++ AC_DEFINE(HURD_C_THREADS, 1,
++ [Define if you are using Mach cthreads directly under /include])
++ LIBS="$LIBS -lthreads"
++ THREADOBJ="Python/thread.o"],[
++ AC_CHECK_HEADER(mach/cthreads.h, [AC_DEFINE(WITH_THREAD)
++ AC_DEFINE(C_THREADS)
++ AC_DEFINE(MACH_C_THREADS, 1,
++ [Define if you are using Mach cthreads under mach /])
++ THREADOBJ="Python/thread.o"],[
++ AC_MSG_CHECKING(for --with-pth)
++ AC_ARG_WITH([pth],
++ AS_HELP_STRING([--with-pth], [use GNU pth threading libraries]),
++ [AC_MSG_RESULT($withval)
++ AC_DEFINE([WITH_THREAD])
++ AC_DEFINE([HAVE_PTH], 1,
++ [Define if you have GNU PTH threads.])
++ LIBS="-lpth $LIBS"
++ THREADOBJ="Python/thread.o"],
++ [AC_MSG_RESULT(no)
++
++ # Just looking for pthread_create in libpthread is not enough:
++ # on HP/UX, pthread.h renames pthread_create to a different symbol name.
++ # So we really have to include pthread.h, and then link.
++ _libs=$LIBS
++ LIBS="$LIBS -lpthread"
++ AC_MSG_CHECKING([for pthread_create in -lpthread])
++ AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <pthread.h>
++
++void * start_routine (void *arg) { exit (0); }]], [[
++pthread_create (NULL, NULL, start_routine, NULL)]])],[
++ AC_MSG_RESULT(yes)
++ AC_DEFINE(WITH_THREAD)
++ posix_threads=yes
++ THREADOBJ="Python/thread.o"],[
++ LIBS=$_libs
++ AC_CHECK_FUNC(pthread_detach, [AC_DEFINE(WITH_THREAD)
++ posix_threads=yes
++ THREADOBJ="Python/thread.o"],[
++ AC_CHECK_HEADER(atheos/threads.h, [AC_DEFINE(WITH_THREAD)
++ AC_DEFINE(ATHEOS_THREADS, 1,
++ [Define this if you have AtheOS threads.])
++ THREADOBJ="Python/thread.o"],[
++ AC_CHECK_HEADER(kernel/OS.h, [AC_DEFINE(WITH_THREAD)
++ AC_DEFINE(BEOS_THREADS, 1,
++ [Define this if you have BeOS threads.])
++ THREADOBJ="Python/thread.o"],[
++ AC_CHECK_LIB(pthreads, pthread_create, [AC_DEFINE(WITH_THREAD)
++ posix_threads=yes
++ LIBS="$LIBS -lpthreads"
++ THREADOBJ="Python/thread.o"], [
++ AC_CHECK_LIB(c_r, pthread_create, [AC_DEFINE(WITH_THREAD)
++ posix_threads=yes
++ LIBS="$LIBS -lc_r"
++ THREADOBJ="Python/thread.o"], [
++ AC_CHECK_LIB(pthread, __pthread_create_system, [AC_DEFINE(WITH_THREAD)
++ posix_threads=yes
++ LIBS="$LIBS -lpthread"
++ THREADOBJ="Python/thread.o"], [
++ AC_CHECK_LIB(cma, pthread_create, [AC_DEFINE(WITH_THREAD)
++ posix_threads=yes
++ LIBS="$LIBS -lcma"
++ THREADOBJ="Python/thread.o"],[
++ USE_THREAD_MODULE="#"])
++ ])])])])])])])])])])
++
++ AC_CHECK_LIB(mpc, usconfig, [AC_DEFINE(WITH_THREAD)
++ LIBS="$LIBS -lmpc"
++ THREADOBJ="Python/thread.o"
++ USE_THREAD_MODULE=""])
++
++ if test "$posix_threads" != "yes"; then
++ AC_CHECK_LIB(thread, thr_create, [AC_DEFINE(WITH_THREAD)
++ LIBS="$LIBS -lthread"
++ THREADOBJ="Python/thread.o"
++ USE_THREAD_MODULE=""])
++ fi
++
++ if test "$USE_THREAD_MODULE" != "#"
++ then
++ # If the above checks didn't disable threads, (at least) OSF1
++ # needs this '-threads' argument during linking.
++ case $ac_sys_system in
++ OSF1) LDLAST=-threads;;
++ esac
++ fi
++fi
++
++if test "$posix_threads" = "yes"; then
++ if test "$unistd_defines_pthreads" = "no"; then
++ AC_DEFINE(_POSIX_THREADS, 1,
++ [Define if you have POSIX threads,
++ and your system does not define that.])
++ fi
++
++ # Bug 662787: Using semaphores causes unexplicable hangs on Solaris 8.
++ case $ac_sys_system/$ac_sys_release in
++ SunOS/5.6) AC_DEFINE(HAVE_PTHREAD_DESTRUCTOR, 1,
++ [Defined for Solaris 2.6 bug in pthread header.])
++ ;;
++ SunOS/5.8) AC_DEFINE(HAVE_BROKEN_POSIX_SEMAPHORES, 1,
++ [Define if the Posix semaphores do not work on your system])
++ ;;
++ AIX/*) AC_DEFINE(HAVE_BROKEN_POSIX_SEMAPHORES, 1,
++ [Define if the Posix semaphores do not work on your system])
++ ;;
++ esac
++
++ AC_MSG_CHECKING(if PTHREAD_SCOPE_SYSTEM is supported)
++ AC_CACHE_VAL(ac_cv_pthread_system_supported,
++ [AC_RUN_IFELSE([AC_LANG_SOURCE([[#include <pthread.h>
++ void *foo(void *parm) {
++ return NULL;
++ }
++ main() {
++ pthread_attr_t attr;
++ pthread_t id;
++ if (pthread_attr_init(&attr)) exit(-1);
++ if (pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM)) exit(-1);
++ if (pthread_create(&id, &attr, foo, NULL)) exit(-1);
++ exit(0);
++ }]])],
++ [ac_cv_pthread_system_supported=yes],
++ [ac_cv_pthread_system_supported=no],
++ [ac_cv_pthread_system_supported=no])
++ ])
++ AC_MSG_RESULT($ac_cv_pthread_system_supported)
++ if test "$ac_cv_pthread_system_supported" = "yes"; then
++ AC_DEFINE(PTHREAD_SYSTEM_SCHED_SUPPORTED, 1, [Defined if PTHREAD_SCOPE_SYSTEM supported.])
++ fi
++ AC_CHECK_FUNCS(pthread_sigmask,
++ [case $ac_sys_system in
++ CYGWIN*)
++ AC_DEFINE(HAVE_BROKEN_PTHREAD_SIGMASK, 1,
++ [Define if pthread_sigmask() does not work on your system.])
++ ;;
++ esac])
++fi
++
++
++# Check for enable-ipv6
++AH_TEMPLATE(ENABLE_IPV6, [Define if --enable-ipv6 is specified])
++AC_MSG_CHECKING([if --enable-ipv6 is specified])
++AC_ARG_ENABLE(ipv6,
++[ --enable-ipv6 Enable ipv6 (with ipv4) support
++ --disable-ipv6 Disable ipv6 support],
++[ case "$enableval" in
++ no)
++ AC_MSG_RESULT(no)
++ ipv6=no
++ ;;
++ *) AC_MSG_RESULT(yes)
++ AC_DEFINE(ENABLE_IPV6)
++ ipv6=yes
++ ;;
++ esac ],
++
++[
++dnl the check does not work on cross compilation case...
++ AC_RUN_IFELSE([AC_LANG_SOURCE([[ /* AF_INET6 available check */
++#include <sys/types.h>
++#include <sys/socket.h>
++main()
++{
++ if (socket(AF_INET6, SOCK_STREAM, 0) < 0)
++ exit(1);
++ else
++ exit(0);
++}
++]])],[
++ AC_MSG_RESULT(yes)
++ ipv6=yes
++],[
++ AC_MSG_RESULT(no)
++ ipv6=no
++],[
++ AC_MSG_RESULT(no)
++ ipv6=no
++])
++
++if test "$ipv6" = "yes"; then
++ AC_MSG_CHECKING(if RFC2553 API is available)
++ AC_COMPILE_IFELSE([
++ AC_LANG_PROGRAM([[#include <sys/types.h>
++#include <netinet/in.h>]],
++ [[struct sockaddr_in6 x;
++ x.sin6_scope_id;]])
++ ],[
++ AC_MSG_RESULT(yes)
++ ipv6=yes
++ ],[
++ AC_MSG_RESULT(no, IPv6 disabled)
++ ipv6=no
++ ])
++fi
++
++if test "$ipv6" = "yes"; then
++ AC_DEFINE(ENABLE_IPV6)
++fi
++])
++
++ipv6type=unknown
++ipv6lib=none
++ipv6trylibc=no
++
++if test "$ipv6" = "yes"; then
++ AC_MSG_CHECKING([ipv6 stack type])
++ for i in inria kame linux-glibc linux-inet6 solaris toshiba v6d zeta;
++ do
++ case $i in
++ inria)
++ dnl http://www.kame.net/
++ AC_EGREP_CPP(yes, [
++#include <netinet/in.h>
++#ifdef IPV6_INRIA_VERSION
++yes
++#endif],
++ [ipv6type=$i])
++ ;;
++ kame)
++ dnl http://www.kame.net/
++ AC_EGREP_CPP(yes, [
++#include <netinet/in.h>
++#ifdef __KAME__
++yes
++#endif],
++ [ipv6type=$i;
++ ipv6lib=inet6
++ ipv6libdir=/usr/local/v6/lib
++ ipv6trylibc=yes])
++ ;;
++ linux-glibc)
++ dnl http://www.v6.linux.or.jp/
++ AC_EGREP_CPP(yes, [
++#include <features.h>
++#if defined(__GLIBC__) && ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1) || (__GLIBC__ > 2))
++yes
++#endif],
++ [ipv6type=$i;
++ ipv6trylibc=yes])
++ ;;
++ linux-inet6)
++ dnl http://www.v6.linux.or.jp/
++ if test -d /usr/inet6; then
++ ipv6type=$i
++ ipv6lib=inet6
++ ipv6libdir=/usr/inet6/lib
++ BASECFLAGS="-I/usr/inet6/include $BASECFLAGS"
++ fi
++ ;;
++ solaris)
++ if test -f /etc/netconfig; then
++ if $GREP -q tcp6 /etc/netconfig; then
++ ipv6type=$i
++ ipv6trylibc=yes
++ fi
++ fi
++ ;;
++ toshiba)
++ AC_EGREP_CPP(yes, [
++#include <sys/param.h>
++#ifdef _TOSHIBA_INET6
++yes
++#endif],
++ [ipv6type=$i;
++ ipv6lib=inet6;
++ ipv6libdir=/usr/local/v6/lib])
++ ;;
++ v6d)
++ AC_EGREP_CPP(yes, [
++#include </usr/local/v6/include/sys/v6config.h>
++#ifdef __V6D__
++yes
++#endif],
++ [ipv6type=$i;
++ ipv6lib=v6;
++ ipv6libdir=/usr/local/v6/lib;
++ BASECFLAGS="-I/usr/local/v6/include $BASECFLAGS"])
++ ;;
++ zeta)
++ AC_EGREP_CPP(yes, [
++#include <sys/param.h>
++#ifdef _ZETA_MINAMI_INET6
++yes
++#endif],
++ [ipv6type=$i;
++ ipv6lib=inet6;
++ ipv6libdir=/usr/local/v6/lib])
++ ;;
++ esac
++ if test "$ipv6type" != "unknown"; then
++ break
++ fi
++ done
++ AC_MSG_RESULT($ipv6type)
++fi
++
++if test "$ipv6" = "yes" -a "$ipv6lib" != "none"; then
++ if test -d $ipv6libdir -a -f $ipv6libdir/lib$ipv6lib.a; then
++ LIBS="-L$ipv6libdir -l$ipv6lib $LIBS"
++ echo "using lib$ipv6lib"
++ else
++ if test $ipv6trylibc = "yes"; then
++ echo "using libc"
++ else
++ echo 'Fatal: no $ipv6lib library found. cannot continue.'
++ echo "You need to fetch lib$ipv6lib.a from appropriate"
++ echo 'ipv6 kit and compile beforehand.'
++ exit 1
++ fi
++ fi
++fi
++
++AC_MSG_CHECKING(for OSX 10.5 SDK or later)
++AC_COMPILE_IFELSE([
++ AC_LANG_PROGRAM([[#include <Carbon/Carbon.h>]], [[FSIORefNum fRef = 0]])
++],[
++ AC_DEFINE(HAVE_OSX105_SDK, 1, [Define if compiling using MacOS X 10.5 SDK or later.])
++ AC_MSG_RESULT(yes)
++],[
++ AC_MSG_RESULT(no)
++])
++
++# Check for --with-doc-strings
++AC_MSG_CHECKING(for --with-doc-strings)
++AC_ARG_WITH(doc-strings,
++ AS_HELP_STRING([--with(out)-doc-strings], [disable/enable documentation strings]))
++
++if test -z "$with_doc_strings"
++then with_doc_strings="yes"
++fi
++if test "$with_doc_strings" != "no"
++then
++ AC_DEFINE(WITH_DOC_STRINGS, 1,
++ [Define if you want documentation strings in extension modules])
++fi
++AC_MSG_RESULT($with_doc_strings)
++
++# Check for Python-specific malloc support
++AC_MSG_CHECKING(for --with-tsc)
++AC_ARG_WITH(tsc,
++ AS_HELP_STRING([--with(out)-tsc],[enable/disable timestamp counter profile]),[
++if test "$withval" != no
++then
++ AC_DEFINE(WITH_TSC, 1,
++ [Define to profile with the Pentium timestamp counter])
++ AC_MSG_RESULT(yes)
++else AC_MSG_RESULT(no)
++fi],
++[AC_MSG_RESULT(no)])
++
++# Check for Python-specific malloc support
++AC_MSG_CHECKING(for --with-pymalloc)
++AC_ARG_WITH(pymalloc,
++ AS_HELP_STRING([--with(out)-pymalloc], [disable/enable specialized mallocs]))
++
++if test -z "$with_pymalloc"
++then with_pymalloc="yes"
++fi
++if test "$with_pymalloc" != "no"
++then
++ AC_DEFINE(WITH_PYMALLOC, 1,
++ [Define if you want to compile in Python-specific mallocs])
++fi
++AC_MSG_RESULT($with_pymalloc)
++
++# Check for Valgrind support
++AC_MSG_CHECKING([for --with-valgrind])
++AC_ARG_WITH([valgrind],
++ AS_HELP_STRING([--with-valgrind], [Enable Valgrind support]),,
++ with_valgrind=no)
++AC_MSG_RESULT([$with_valgrind])
++if test "$with_valgrind" != no; then
++ AC_CHECK_HEADER([valgrind/valgrind.h],
++ [AC_DEFINE([WITH_VALGRIND], 1, [Define if you want pymalloc to be disabled when running under valgrind])],
++ [AC_MSG_ERROR([Valgrind support requested but headers not available])]
++ )
++fi
++
++# Check for --with-wctype-functions
++AC_MSG_CHECKING(for --with-wctype-functions)
++AC_ARG_WITH(wctype-functions,
++ AS_HELP_STRING([--with-wctype-functions], [use wctype.h functions]),
++[
++if test "$withval" != no
++then
++ AC_DEFINE(WANT_WCTYPE_FUNCTIONS, 1,
++ [Define if you want wctype.h functions to be used instead of the
++ one supplied by Python itself. (see Include/unicodectype.h).])
++ AC_MSG_RESULT(yes)
++else AC_MSG_RESULT(no)
++fi],
++[AC_MSG_RESULT(no)])
++
++# -I${DLINCLDIR} is added to the compile rule for importdl.o
++AC_SUBST(DLINCLDIR)
++DLINCLDIR=.
++
++# the dlopen() function means we might want to use dynload_shlib.o. some
++# platforms, such as AIX, have dlopen(), but don't want to use it.
++AC_CHECK_FUNCS(dlopen)
++
++# DYNLOADFILE specifies which dynload_*.o file we will use for dynamic
++# loading of modules.
++AC_SUBST(DYNLOADFILE)
++AC_MSG_CHECKING(DYNLOADFILE)
++if test -z "$DYNLOADFILE"
++then
++ case $ac_sys_system/$ac_sys_release in
++ AIX*) # Use dynload_shlib.c and dlopen() if we have it; otherwise dynload_aix.c
++ if test "$ac_cv_func_dlopen" = yes
++ then DYNLOADFILE="dynload_shlib.o"
++ else DYNLOADFILE="dynload_aix.o"
++ fi
++ ;;
++ BeOS*) DYNLOADFILE="dynload_beos.o";;
++ hp*|HP*) DYNLOADFILE="dynload_hpux.o";;
++ # Use dynload_next.c only on 10.2 and below, which don't have native dlopen()
++ Darwin/@<:@0156@:>@\..*) DYNLOADFILE="dynload_next.o";;
++ atheos*) DYNLOADFILE="dynload_atheos.o";;
++ *)
++ # use dynload_shlib.c and dlopen() if we have it; otherwise stub
++ # out any dynamic loading
++ if test "$ac_cv_func_dlopen" = yes
++ then DYNLOADFILE="dynload_shlib.o"
++ else DYNLOADFILE="dynload_stub.o"
++ fi
++ ;;
++ esac
++fi
++AC_MSG_RESULT($DYNLOADFILE)
++if test "$DYNLOADFILE" != "dynload_stub.o"
++then
++ AC_DEFINE(HAVE_DYNAMIC_LOADING, 1,
++ [Defined when any dynamic module loading is enabled.])
++fi
++
++# MACHDEP_OBJS can be set to platform-specific object files needed by Python
++
++AC_SUBST(MACHDEP_OBJS)
++AC_MSG_CHECKING(MACHDEP_OBJS)
++if test -z "$MACHDEP_OBJS"
++then
++ MACHDEP_OBJS=$extra_machdep_objs
++else
++ MACHDEP_OBJS="$MACHDEP_OBJS $extra_machdep_objs"
++fi
++AC_MSG_RESULT(MACHDEP_OBJS)
++
++# checks for library functions
++AC_CHECK_FUNCS(alarm setitimer getitimer bind_textdomain_codeset chown \
++ clock confstr ctermid execv fchmod fchown fork fpathconf ftime ftruncate \
++ gai_strerror getgroups getlogin getloadavg getpeername getpgid getpid \
++ getpriority getresuid getresgid getpwent getspnam getspent getsid getwd \
++ initgroups kill killpg lchmod lchown lstat mkfifo mknod mktime \
++ mremap nice pathconf pause plock poll pthread_init \
++ putenv readlink realpath \
++ select sem_open sem_timedwait sem_getvalue sem_unlink setegid seteuid \
++ setgid \
++ setlocale setregid setreuid setsid setpgid setpgrp setuid setvbuf snprintf \
++ setlocale setregid setreuid setresuid setresgid \
++ setsid setpgid setpgrp setuid setvbuf snprintf \
++ sigaction siginterrupt sigrelse strftime \
++ sysconf tcgetpgrp tcsetpgrp tempnam timegm times tmpfile tmpnam tmpnam_r \
++ truncate uname unsetenv utimes waitpid wait3 wait4 wcscoll _getpty)
++
++# For some functions, having a definition is not sufficient, since
++# we want to take their address.
++AC_MSG_CHECKING(for chroot)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[void *x=chroot]])],
++ [AC_DEFINE(HAVE_CHROOT, 1, Define if you have the 'chroot' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++AC_MSG_CHECKING(for link)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[void *x=link]])],
++ [AC_DEFINE(HAVE_LINK, 1, Define if you have the 'link' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++AC_MSG_CHECKING(for symlink)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[void *x=symlink]])],
++ [AC_DEFINE(HAVE_SYMLINK, 1, Define if you have the 'symlink' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++AC_MSG_CHECKING(for fchdir)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[void *x=fchdir]])],
++ [AC_DEFINE(HAVE_FCHDIR, 1, Define if you have the 'fchdir' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++AC_MSG_CHECKING(for fsync)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[void *x=fsync]])],
++ [AC_DEFINE(HAVE_FSYNC, 1, Define if you have the 'fsync' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++AC_MSG_CHECKING(for fdatasync)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[void *x=fdatasync]])],
++ [AC_DEFINE(HAVE_FDATASYNC, 1, Define if you have the 'fdatasync' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++AC_MSG_CHECKING(for epoll)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/epoll.h>]], [[void *x=epoll_create]])],
++ [AC_DEFINE(HAVE_EPOLL, 1, Define if you have the 'epoll' functions.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++AC_MSG_CHECKING(for kqueue)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++#include <sys/types.h>
++#include <sys/event.h>
++ ]], [[int x=kqueue()]])],
++ [AC_DEFINE(HAVE_KQUEUE, 1, Define if you have the 'kqueue' functions.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++# On some systems (eg. FreeBSD 5), we would find a definition of the
++# functions ctermid_r, setgroups in the library, but no prototype
++# (e.g. because we use _XOPEN_SOURCE). See whether we can take their
++# address to avoid compiler warnings and potential miscompilations
++# because of the missing prototypes.
++
++AC_MSG_CHECKING(for ctermid_r)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++#include <stdio.h>
++]], [[void* p = ctermid_r]])],
++ [AC_DEFINE(HAVE_CTERMID_R, 1, Define if you have the 'ctermid_r' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++
++AC_CACHE_CHECK([for flock declaration], [ac_cv_flock_decl],
++ [AC_COMPILE_IFELSE(
++ [AC_LANG_PROGRAM(
++ [#include <sys/file.h>],
++ [void* p = flock]
++ )],
++ [ac_cv_flock_decl=yes],
++ [ac_cv_flock_decl=no]
++ )
++])
++if test "x${ac_cv_flock_decl}" = xyes; then
++ AC_CHECK_FUNCS(flock,,
++ AC_CHECK_LIB(bsd,flock,
++ [AC_DEFINE(HAVE_FLOCK)
++ AC_DEFINE(FLOCK_NEEDS_LIBBSD, 1, Define if flock needs to be linked with bsd library.)
++ ])
++ )
++fi
++
++AC_MSG_CHECKING(for getpagesize)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++#include <unistd.h>
++]], [[void* p = getpagesize]])],
++ [AC_DEFINE(HAVE_GETPAGESIZE, 1, Define if you have the 'getpagesize' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++
++AC_MSG_CHECKING(for broken unsetenv)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++#include <stdlib.h>
++]], [[int res = unsetenv("DUMMY")]])],
++ [AC_MSG_RESULT(no)],
++ [AC_DEFINE(HAVE_BROKEN_UNSETENV, 1, Define if `unsetenv` does not return an int.)
++ AC_MSG_RESULT(yes)
++])
++
++dnl check for true
++AC_CHECK_PROGS(TRUE, true, /bin/true)
++
++dnl On some systems (e.g. Solaris 9), hstrerror and inet_aton are in -lresolv
++dnl On others, they are in the C library, so we to take no action
++AC_CHECK_LIB(c, inet_aton, [$ac_cv_prog_TRUE],
++ AC_CHECK_LIB(resolv, inet_aton)
++)
++
++# On Tru64, chflags seems to be present, but calling it will
++# exit Python
++AC_CACHE_CHECK([for chflags], [ac_cv_have_chflags], [dnl
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <sys/stat.h>
++#include <unistd.h>
++int main(int argc, char*argv[])
++{
++ if(chflags(argv[0], 0) != 0)
++ return 1;
++ return 0;
++}
++]])],
++[ac_cv_have_chflags=yes],
++[ac_cv_have_chflags=no],
++[ac_cv_have_chflags=cross])
++])
++if test "$ac_cv_have_chflags" = cross ; then
++ AC_CHECK_FUNC([chflags], [ac_cv_have_chflags="yes"], [ac_cv_have_chflags="no"])
++fi
++if test "$ac_cv_have_chflags" = yes ; then
++ AC_DEFINE(HAVE_CHFLAGS, 1, [Define to 1 if you have the 'chflags' function.])
++fi
++
++AC_CACHE_CHECK([for lchflags], [ac_cv_have_lchflags], [dnl
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <sys/stat.h>
++#include <unistd.h>
++int main(int argc, char*argv[])
++{
++ if(lchflags(argv[0], 0) != 0)
++ return 1;
++ return 0;
++}
++]])],[ac_cv_have_lchflags=yes],[ac_cv_have_lchflags=no],[ac_cv_have_lchflags=cross])
++])
++if test "$ac_cv_have_lchflags" = cross ; then
++ AC_CHECK_FUNC([lchflags], [ac_cv_have_lchflags="yes"], [ac_cv_have_lchflags="no"])
++fi
++if test "$ac_cv_have_lchflags" = yes ; then
++ AC_DEFINE(HAVE_LCHFLAGS, 1, [Define to 1 if you have the 'lchflags' function.])
++fi
++
++dnl Check if system zlib has *Copy() functions
++dnl
++dnl On MacOSX the linker will search for dylibs on the entire linker path
++dnl before searching for static libraries. setup.py adds -Wl,-search_paths_first
++dnl to revert to a more traditional unix behaviour and make it possible to
++dnl override the system libz with a local static library of libz. Temporarily
++dnl add that flag to our CFLAGS as well to ensure that we check the version
++dnl of libz that will be used by setup.py.
++dnl The -L/usr/local/lib is needed as wel to get the same compilation
++dnl environment as setup.py (and leaving it out can cause configure to use the
++dnl wrong version of the library)
++case $ac_sys_system/$ac_sys_release in
++Darwin/*)
++ _CUR_CFLAGS="${CFLAGS}"
++ _CUR_LDFLAGS="${LDFLAGS}"
++ CFLAGS="${CFLAGS} -Wl,-search_paths_first"
++ LDFLAGS="${LDFLAGS} -Wl,-search_paths_first -L/usr/local/lib"
++ ;;
++esac
++
++AC_CHECK_LIB(z, inflateCopy, AC_DEFINE(HAVE_ZLIB_COPY, 1, [Define if the zlib library has inflateCopy]))
++
++case $ac_sys_system/$ac_sys_release in
++Darwin/*)
++ CFLAGS="${_CUR_CFLAGS}"
++ LDFLAGS="${_CUR_LDFLAGS}"
++ ;;
++esac
++
++AC_MSG_CHECKING(for hstrerror)
++AC_LINK_IFELSE([AC_LANG_PROGRAM([[
++#include <netdb.h>
++]], [[void* p = hstrerror; hstrerror(0)]])],
++ [AC_DEFINE(HAVE_HSTRERROR, 1, Define if you have the 'hstrerror' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++
++AC_MSG_CHECKING(for inet_aton)
++AC_LINK_IFELSE([AC_LANG_PROGRAM([[
++#include <sys/types.h>
++#include <sys/socket.h>
++#include <netinet/in.h>
++#include <arpa/inet.h>
++]], [[void* p = inet_aton;inet_aton(0,0)]])],
++ [AC_DEFINE(HAVE_INET_ATON, 1, Define if you have the 'inet_aton' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++
++AC_MSG_CHECKING(for inet_pton)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++#include <sys/types.h>
++#include <sys/socket.h>
++#include <netinet/in.h>
++#include <arpa/inet.h>
++]], [[void* p = inet_pton]])],
++ [AC_DEFINE(HAVE_INET_PTON, 1, Define if you have the 'inet_pton' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++
++# On some systems, setgroups is in unistd.h, on others, in grp.h
++AC_MSG_CHECKING(for setgroups)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++#include <unistd.h>
++#ifdef HAVE_GRP_H
++#include <grp.h>
++#endif
++]], [[void* p = setgroups]])],
++ [AC_DEFINE(HAVE_SETGROUPS, 1, Define if you have the 'setgroups' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)
++])
++
++# check for openpty and forkpty
++
++AC_CHECK_FUNCS(openpty,,
++ AC_CHECK_LIB(util,openpty,
++ [AC_DEFINE(HAVE_OPENPTY) LIBS="$LIBS -lutil"],
++ AC_CHECK_LIB(bsd,openpty, [AC_DEFINE(HAVE_OPENPTY) LIBS="$LIBS -lbsd"])
++ )
++)
++AC_CHECK_FUNCS(forkpty,,
++ AC_CHECK_LIB(util,forkpty,
++ [AC_DEFINE(HAVE_FORKPTY) LIBS="$LIBS -lutil"],
++ AC_CHECK_LIB(bsd,forkpty, [AC_DEFINE(HAVE_FORKPTY) LIBS="$LIBS -lbsd"])
++ )
++)
++
++# Stuff for expat.
++AC_CHECK_FUNCS(memmove)
++
++# check for long file support functions
++AC_CHECK_FUNCS(fseek64 fseeko fstatvfs ftell64 ftello statvfs)
++
++AC_REPLACE_FUNCS(dup2 getcwd strdup)
++AC_CHECK_FUNCS(getpgrp,
++ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[getpgrp(0);]])],
++ [AC_DEFINE(GETPGRP_HAVE_ARG, 1, [Define if getpgrp() must be called as getpgrp(0).])],
++ [])
++)
++AC_CHECK_FUNCS(setpgrp,
++ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[setpgrp(0,0);]])],
++ [AC_DEFINE(SETPGRP_HAVE_ARG, 1, [Define if setpgrp() must be called as setpgrp(0, 0).])],
++ [])
++)
++AC_CHECK_FUNCS(gettimeofday,
++ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/time.h>]],
++ [[gettimeofday((struct timeval*)0,(struct timezone*)0);]])],
++ [],
++ [AC_DEFINE(GETTIMEOFDAY_NO_TZ, 1,
++ [Define if gettimeofday() does not have second (timezone) argument
++ This is the case on Motorola V4 (R40V4.2)])
++ ])
++)
++
++AC_MSG_CHECKING(for major, minor, and makedev)
++AC_LINK_IFELSE([AC_LANG_PROGRAM([[
++#if defined(MAJOR_IN_MKDEV)
++#include <sys/mkdev.h>
++#elif defined(MAJOR_IN_SYSMACROS)
++#include <sys/sysmacros.h>
++#else
++#include <sys/types.h>
++#endif
++]], [[
++ makedev(major(0),minor(0));
++]])],[
++ AC_DEFINE(HAVE_DEVICE_MACROS, 1,
++ [Define to 1 if you have the device macros.])
++ AC_MSG_RESULT(yes)
++],[
++ AC_MSG_RESULT(no)
++])
++
++# On OSF/1 V5.1, getaddrinfo is available, but a define
++# for [no]getaddrinfo in netdb.h.
++AC_MSG_CHECKING(for getaddrinfo)
++AC_LINK_IFELSE([AC_LANG_PROGRAM([[
++#include <sys/types.h>
++#include <sys/socket.h>
++#include <netdb.h>
++#include <stdio.h>
++]], [[getaddrinfo(NULL, NULL, NULL, NULL);]])],
++[have_getaddrinfo=yes],
++[have_getaddrinfo=no])
++AC_MSG_RESULT($have_getaddrinfo)
++if test $have_getaddrinfo = yes
++then
++ AC_MSG_CHECKING(getaddrinfo bug)
++ AC_CACHE_VAL(ac_cv_buggy_getaddrinfo,
++ AC_RUN_IFELSE([AC_LANG_SOURCE([[[
++#include <sys/types.h>
++#include <netdb.h>
++#include <string.h>
++#include <sys/socket.h>
++#include <netinet/in.h>
++
++int main()
++{
++ int passive, gaierr, inet4 = 0, inet6 = 0;
++ struct addrinfo hints, *ai, *aitop;
++ char straddr[INET6_ADDRSTRLEN], strport[16];
++
++ for (passive = 0; passive <= 1; passive++) {
++ memset(&hints, 0, sizeof(hints));
++ hints.ai_family = AF_UNSPEC;
++ hints.ai_flags = passive ? AI_PASSIVE : 0;
++ hints.ai_socktype = SOCK_STREAM;
++ hints.ai_protocol = IPPROTO_TCP;
++ if ((gaierr = getaddrinfo(NULL, "54321", &hints, &aitop)) != 0) {
++ (void)gai_strerror(gaierr);
++ goto bad;
++ }
++ for (ai = aitop; ai; ai = ai->ai_next) {
++ if (ai->ai_addr == NULL ||
++ ai->ai_addrlen == 0 ||
++ getnameinfo(ai->ai_addr, ai->ai_addrlen,
++ straddr, sizeof(straddr), strport, sizeof(strport),
++ NI_NUMERICHOST|NI_NUMERICSERV) != 0) {
++ goto bad;
++ }
++ switch (ai->ai_family) {
++ case AF_INET:
++ if (strcmp(strport, "54321") != 0) {
++ goto bad;
++ }
++ if (passive) {
++ if (strcmp(straddr, "0.0.0.0") != 0) {
++ goto bad;
++ }
++ } else {
++ if (strcmp(straddr, "127.0.0.1") != 0) {
++ goto bad;
++ }
++ }
++ inet4++;
++ break;
++ case AF_INET6:
++ if (strcmp(strport, "54321") != 0) {
++ goto bad;
++ }
++ if (passive) {
++ if (strcmp(straddr, "::") != 0) {
++ goto bad;
++ }
++ } else {
++ if (strcmp(straddr, "::1") != 0) {
++ goto bad;
++ }
++ }
++ inet6++;
++ break;
++ case AF_UNSPEC:
++ goto bad;
++ break;
++ default:
++ /* another family support? */
++ break;
++ }
++ }
++ }
++
++ if (!(inet4 == 0 || inet4 == 2))
++ goto bad;
++ if (!(inet6 == 0 || inet6 == 2))
++ goto bad;
++
++ if (aitop)
++ freeaddrinfo(aitop);
++ return 0;
++
++ bad:
++ if (aitop)
++ freeaddrinfo(aitop);
++ return 1;
++}
++]]])],
++[ac_cv_buggy_getaddrinfo=no],
++[ac_cv_buggy_getaddrinfo=yes],
++[ac_cv_buggy_getaddrinfo=yes]))
++fi
++
++AC_MSG_RESULT($ac_cv_buggy_getaddrinfo)
++
++if test $have_getaddrinfo = no -o "$ac_cv_buggy_getaddrinfo" = yes
++then
++ if test $ipv6 = yes
++ then
++ echo 'Fatal: You must get working getaddrinfo() function.'
++ echo ' or you can specify "--disable-ipv6"'.
++ exit 1
++ fi
++else
++ AC_DEFINE(HAVE_GETADDRINFO, 1, [Define if you have the getaddrinfo function.])
++fi
++
++AC_CHECK_FUNCS(getnameinfo)
++
++# checks for structures
++AC_HEADER_TIME
++AC_STRUCT_TM
++AC_STRUCT_TIMEZONE
++AC_CHECK_MEMBERS([struct stat.st_rdev])
++AC_CHECK_MEMBERS([struct stat.st_blksize])
++AC_CHECK_MEMBERS([struct stat.st_flags])
++AC_CHECK_MEMBERS([struct stat.st_gen])
++AC_CHECK_MEMBERS([struct stat.st_birthtime])
++AC_STRUCT_ST_BLOCKS
++
++AC_MSG_CHECKING(for time.h that defines altzone)
++AC_CACHE_VAL(ac_cv_header_time_altzone,[
++ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <time.h>]], [[return altzone;]])],
++ [ac_cv_header_time_altzone=yes],
++ [ac_cv_header_time_altzone=no])
++ ])
++AC_MSG_RESULT($ac_cv_header_time_altzone)
++if test $ac_cv_header_time_altzone = yes; then
++ AC_DEFINE(HAVE_ALTZONE, 1, [Define this if your time.h defines altzone.])
++fi
++
++was_it_defined=no
++AC_MSG_CHECKING(whether sys/select.h and sys/time.h may both be included)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++#include <sys/types.h>
++#include <sys/select.h>
++#include <sys/time.h>
++]], [[;]])],[
++ AC_DEFINE(SYS_SELECT_WITH_SYS_TIME, 1,
++ [Define if you can safely include both <sys/select.h> and <sys/time.h>
++ (which you can't on SCO ODT 3.0).])
++ was_it_defined=yes
++],[])
++AC_MSG_RESULT($was_it_defined)
++
++AC_MSG_CHECKING(for addrinfo)
++AC_CACHE_VAL(ac_cv_struct_addrinfo,
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <netdb.h>]], [[struct addrinfo a]])],
++ [ac_cv_struct_addrinfo=yes],
++ [ac_cv_struct_addrinfo=no]))
++AC_MSG_RESULT($ac_cv_struct_addrinfo)
++if test $ac_cv_struct_addrinfo = yes; then
++ AC_DEFINE(HAVE_ADDRINFO, 1, [struct addrinfo (netdb.h)])
++fi
++
++AC_MSG_CHECKING(for sockaddr_storage)
++AC_CACHE_VAL(ac_cv_struct_sockaddr_storage,
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++# include <sys/types.h>
++# include <sys/socket.h>]], [[struct sockaddr_storage s]])],
++ [ac_cv_struct_sockaddr_storage=yes],
++ [ac_cv_struct_sockaddr_storage=no]))
++AC_MSG_RESULT($ac_cv_struct_sockaddr_storage)
++if test $ac_cv_struct_sockaddr_storage = yes; then
++ AC_DEFINE(HAVE_SOCKADDR_STORAGE, 1, [struct sockaddr_storage (sys/socket.h)])
++fi
++
++# checks for compiler characteristics
++
++AC_C_CHAR_UNSIGNED
++AC_C_CONST
++
++works=no
++AC_MSG_CHECKING(for working volatile)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[volatile int x; x = 0;]])],
++ [works=yes],
++ [AC_DEFINE(volatile, , [Define to empty if the keyword does not work.])]
++)
++AC_MSG_RESULT($works)
++
++works=no
++AC_MSG_CHECKING(for working signed char)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[signed char c;]])],
++ [works=yes],
++ [AC_DEFINE(signed, , [Define to empty if the keyword does not work.])]
++)
++AC_MSG_RESULT($works)
++
++have_prototypes=no
++AC_MSG_CHECKING(for prototypes)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[int foo(int x) { return 0; }]], [[return foo(10);]])],
++ [AC_DEFINE(HAVE_PROTOTYPES, 1,
++ [Define if your compiler supports function prototype])
++ have_prototypes=yes],
++ []
++)
++AC_MSG_RESULT($have_prototypes)
++
++works=no
++AC_MSG_CHECKING(for variable length prototypes and stdarg.h)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++#include <stdarg.h>
++int foo(int x, ...) {
++ va_list va;
++ va_start(va, x);
++ va_arg(va, int);
++ va_arg(va, char *);
++ va_arg(va, double);
++ return 0;
++}
++]], [[return foo(10, "", 3.14);]])],[
++ AC_DEFINE(HAVE_STDARG_PROTOTYPES, 1,
++ [Define if your compiler supports variable length function prototypes
++ (e.g. void fprintf(FILE *, char *, ...);) *and* <stdarg.h>])
++ works=yes
++],[])
++AC_MSG_RESULT($works)
++
++# check for socketpair
++AC_MSG_CHECKING(for socketpair)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++#include <sys/types.h>
++#include <sys/socket.h>
++]], [[void *x=socketpair]])],
++ [AC_DEFINE(HAVE_SOCKETPAIR, 1, [Define if you have the 'socketpair' function.])
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)]
++)
++
++# check if sockaddr has sa_len member
++AC_MSG_CHECKING(if sockaddr has sa_len member)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/types.h>
++#include <sys/socket.h>]], [[struct sockaddr x;
++x.sa_len = 0;]])],
++ [AC_MSG_RESULT(yes)
++ AC_DEFINE(HAVE_SOCKADDR_SA_LEN, 1, [Define if sockaddr has sa_len member])],
++ [AC_MSG_RESULT(no)]
++)
++
++va_list_is_array=no
++AC_MSG_CHECKING(whether va_list is an array)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++#ifdef HAVE_STDARG_PROTOTYPES
++#include <stdarg.h>
++#else
++#include <varargs.h>
++#endif
++]], [[va_list list1, list2; list1 = list2;]])],[],[
++ AC_DEFINE(VA_LIST_IS_ARRAY, 1, [Define if a va_list is an array of some kind])
++ va_list_is_array=yes
++])
++AC_MSG_RESULT($va_list_is_array)
++
++# sigh -- gethostbyname_r is a mess; it can have 3, 5 or 6 arguments :-(
++AH_TEMPLATE(HAVE_GETHOSTBYNAME_R,
++ [Define this if you have some version of gethostbyname_r()])
++
++AC_CHECK_FUNC(gethostbyname_r, [
++ AC_DEFINE(HAVE_GETHOSTBYNAME_R)
++ AC_MSG_CHECKING([gethostbyname_r with 6 args])
++ OLD_CFLAGS=$CFLAGS
++ CFLAGS="$CFLAGS $MY_CPPFLAGS $MY_THREAD_CPPFLAGS $MY_CFLAGS"
++ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++# include <netdb.h>
++ ]], [[
++ char *name;
++ struct hostent *he, *res;
++ char buffer[2048];
++ int buflen = 2048;
++ int h_errnop;
++
++ (void) gethostbyname_r(name, he, buffer, buflen, &res, &h_errnop)
++ ]])],[
++ AC_DEFINE(HAVE_GETHOSTBYNAME_R)
++ AC_DEFINE(HAVE_GETHOSTBYNAME_R_6_ARG, 1,
++ [Define this if you have the 6-arg version of gethostbyname_r().])
++ AC_MSG_RESULT(yes)
++ ],[
++ AC_MSG_RESULT(no)
++ AC_MSG_CHECKING([gethostbyname_r with 5 args])
++ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++# include <netdb.h>
++ ]], [[
++ char *name;
++ struct hostent *he;
++ char buffer[2048];
++ int buflen = 2048;
++ int h_errnop;
++
++ (void) gethostbyname_r(name, he, buffer, buflen, &h_errnop)
++ ]])],
++ [
++ AC_DEFINE(HAVE_GETHOSTBYNAME_R)
++ AC_DEFINE(HAVE_GETHOSTBYNAME_R_5_ARG, 1,
++ [Define this if you have the 5-arg version of gethostbyname_r().])
++ AC_MSG_RESULT(yes)
++ ], [
++ AC_MSG_RESULT(no)
++ AC_MSG_CHECKING([gethostbyname_r with 3 args])
++ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++# include <netdb.h>
++ ]], [[
++ char *name;
++ struct hostent *he;
++ struct hostent_data data;
++
++ (void) gethostbyname_r(name, he, &data);
++ ]])],
++ [
++ AC_DEFINE(HAVE_GETHOSTBYNAME_R)
++ AC_DEFINE(HAVE_GETHOSTBYNAME_R_3_ARG, 1,
++ [Define this if you have the 3-arg version of gethostbyname_r().])
++ AC_MSG_RESULT(yes)
++ ], [
++ AC_MSG_RESULT(no)
++ ])
++ ])
++ ])
++ CFLAGS=$OLD_CFLAGS
++], [
++ AC_CHECK_FUNCS(gethostbyname)
++])
++AC_SUBST(HAVE_GETHOSTBYNAME_R_6_ARG)
++AC_SUBST(HAVE_GETHOSTBYNAME_R_5_ARG)
++AC_SUBST(HAVE_GETHOSTBYNAME_R_3_ARG)
++AC_SUBST(HAVE_GETHOSTBYNAME_R)
++AC_SUBST(HAVE_GETHOSTBYNAME)
++
++# checks for system services
++# (none yet)
++
++# Linux requires this for correct f.p. operations
++AC_CHECK_FUNC(__fpu_control,
++ [],
++ [AC_CHECK_LIB(ieee, __fpu_control)
++])
++
++# Check for --with-fpectl
++AC_MSG_CHECKING(for --with-fpectl)
++AC_ARG_WITH(fpectl,
++ AS_HELP_STRING([--with-fpectl], [enable SIGFPE catching]),
++[
++if test "$withval" != no
++then
++ AC_DEFINE(WANT_SIGFPE_HANDLER, 1,
++ [Define if you want SIGFPE handled (see Include/pyfpe.h).])
++ AC_MSG_RESULT(yes)
++else AC_MSG_RESULT(no)
++fi],
++[AC_MSG_RESULT(no)])
++
++# check for --with-libm=...
++AC_SUBST(LIBM)
++case $ac_sys_system in
++Darwin) ;;
++BeOS) ;;
++*) LIBM=-lm
++esac
++AC_MSG_CHECKING(for --with-libm=STRING)
++AC_ARG_WITH(libm,
++ AS_HELP_STRING([--with-libm=STRING], [math library]),
++[
++if test "$withval" = no
++then LIBM=
++ AC_MSG_RESULT(force LIBM empty)
++elif test "$withval" != yes
++then LIBM=$withval
++ AC_MSG_RESULT(set LIBM="$withval")
++else AC_MSG_ERROR([proper usage is --with-libm=STRING])
++fi],
++[AC_MSG_RESULT(default LIBM="$LIBM")])
++
++# check for --with-libc=...
++AC_SUBST(LIBC)
++AC_MSG_CHECKING(for --with-libc=STRING)
++AC_ARG_WITH(libc,
++ AS_HELP_STRING([--with-libc=STRING], [C library]),
++[
++if test "$withval" = no
++then LIBC=
++ AC_MSG_RESULT(force LIBC empty)
++elif test "$withval" != yes
++then LIBC=$withval
++ AC_MSG_RESULT(set LIBC="$withval")
++else AC_MSG_ERROR([proper usage is --with-libc=STRING])
++fi],
++[AC_MSG_RESULT(default LIBC="$LIBC")])
++
++# **************************************************
++# * Check for various properties of floating point *
++# **************************************************
++
++AC_MSG_CHECKING(whether C doubles are little-endian IEEE 754 binary64)
++AC_CACHE_VAL(ac_cv_little_endian_double, [
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <string.h>
++int main() {
++ double x = 9006104071832581.0;
++ if (memcmp(&x, "\x05\x04\x03\x02\x01\xff\x3f\x43", 8) == 0)
++ return 0;
++ else
++ return 1;
++}
++]])],
++[ac_cv_little_endian_double=yes],
++[ac_cv_little_endian_double=no],
++[ac_cv_little_endian_double=no])])
++AC_MSG_RESULT($ac_cv_little_endian_double)
++if test "$ac_cv_little_endian_double" = yes
++then
++ AC_DEFINE(DOUBLE_IS_LITTLE_ENDIAN_IEEE754, 1,
++ [Define if C doubles are 64-bit IEEE 754 binary format, stored
++ with the least significant byte first])
++fi
++
++AC_MSG_CHECKING(whether C doubles are big-endian IEEE 754 binary64)
++AC_CACHE_VAL(ac_cv_big_endian_double, [
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <string.h>
++int main() {
++ double x = 9006104071832581.0;
++ if (memcmp(&x, "\x43\x3f\xff\x01\x02\x03\x04\x05", 8) == 0)
++ return 0;
++ else
++ return 1;
++}
++]])],
++[ac_cv_big_endian_double=yes],
++[ac_cv_big_endian_double=no],
++[ac_cv_big_endian_double=no])])
++AC_MSG_RESULT($ac_cv_big_endian_double)
++if test "$ac_cv_big_endian_double" = yes
++then
++ AC_DEFINE(DOUBLE_IS_BIG_ENDIAN_IEEE754, 1,
++ [Define if C doubles are 64-bit IEEE 754 binary format, stored
++ with the most significant byte first])
++fi
++
++# Some ARM platforms use a mixed-endian representation for doubles.
++# While Python doesn't currently have full support for these platforms
++# (see e.g., issue 1762561), we can at least make sure that float <-> string
++# conversions work.
++AC_MSG_CHECKING(whether C doubles are ARM mixed-endian IEEE 754 binary64)
++AC_CACHE_VAL(ac_cv_mixed_endian_double, [
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <string.h>
++int main() {
++ double x = 9006104071832581.0;
++ if (memcmp(&x, "\x01\xff\x3f\x43\x05\x04\x03\x02", 8) == 0)
++ return 0;
++ else
++ return 1;
++}
++]])],
++[ac_cv_mixed_endian_double=yes],
++[ac_cv_mixed_endian_double=no],
++[ac_cv_mixed_endian_double=no])])
++AC_MSG_RESULT($ac_cv_mixed_endian_double)
++if test "$ac_cv_mixed_endian_double" = yes
++then
++ AC_DEFINE(DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754, 1,
++ [Define if C doubles are 64-bit IEEE 754 binary format, stored
++ in ARM mixed-endian order (byte order 45670123)])
++fi
++
++# The short float repr introduced in Python 3.1 requires the
++# correctly-rounded string <-> double conversion functions from
++# Python/dtoa.c, which in turn require that the FPU uses 53-bit
++# rounding; this is a problem on x86, where the x87 FPU has a default
++# rounding precision of 64 bits. For gcc/x86, we can fix this by
++# using inline assembler to get and set the x87 FPU control word.
++
++# This inline assembler syntax may also work for suncc and icc,
++# so we try it on all platforms.
++
++AC_MSG_CHECKING(whether we can use gcc inline assembler to get and set x87 control word)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[
++ unsigned short cw;
++ __asm__ __volatile__ ("fnstcw %0" : "=m" (cw));
++ __asm__ __volatile__ ("fldcw %0" : : "m" (cw));
++]])],[have_gcc_asm_for_x87=yes],[have_gcc_asm_for_x87=no])
++AC_MSG_RESULT($have_gcc_asm_for_x87)
++if test "$have_gcc_asm_for_x87" = yes
++then
++ AC_DEFINE(HAVE_GCC_ASM_FOR_X87, 1,
++ [Define if we can use gcc inline assembler to get and set x87 control word])
++fi
++
++# Detect whether system arithmetic is subject to x87-style double
++# rounding issues. The result of this test has little meaning on non
++# IEEE 754 platforms. On IEEE 754, test should return 1 if rounding
++# mode is round-to-nearest and double rounding issues are present, and
++# 0 otherwise. See http://bugs.python.org/issue2937 for more info.
++AC_MSG_CHECKING(for x87-style double rounding)
++# $BASECFLAGS may affect the result
++ac_save_cc="$CC"
++CC="$CC $BASECFLAGS"
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <stdlib.h>
++#include <math.h>
++int main() {
++ volatile double x, y, z;
++ /* 1./(1-2**-53) -> 1+2**-52 (correct), 1.0 (double rounding) */
++ x = 0.99999999999999989; /* 1-2**-53 */
++ y = 1./x;
++ if (y != 1.)
++ exit(0);
++ /* 1e16+2.99999 -> 1e16+2. (correct), 1e16+4. (double rounding) */
++ x = 1e16;
++ y = 2.99999;
++ z = x + y;
++ if (z != 1e16+4.)
++ exit(0);
++ /* both tests show evidence of double rounding */
++ exit(1);
++}
++]])],
++[ac_cv_x87_double_rounding=no],
++[ac_cv_x87_double_rounding=yes],
++[ac_cv_x87_double_rounding=no])
++CC="$ac_save_cc"
++AC_MSG_RESULT($ac_cv_x87_double_rounding)
++if test "$ac_cv_x87_double_rounding" = yes
++then
++ AC_DEFINE(X87_DOUBLE_ROUNDING, 1,
++ [Define if arithmetic is subject to x87-style double rounding issue])
++fi
++
++# ************************************
++# * Check for mathematical functions *
++# ************************************
++
++LIBS_SAVE=$LIBS
++LIBS="$LIBS $LIBM"
++
++# On FreeBSD 6.2, it appears that tanh(-0.) returns 0. instead of
++# -0. on some architectures.
++AC_MSG_CHECKING(whether tanh preserves the sign of zero)
++AC_CACHE_VAL(ac_cv_tanh_preserves_zero_sign, [
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <math.h>
++#include <stdlib.h>
++int main() {
++ /* return 0 if either negative zeros don't exist
++ on this platform or if negative zeros exist
++ and tanh(-0.) == -0. */
++ if (atan2(0., -1.) == atan2(-0., -1.) ||
++ atan2(tanh(-0.), -1.) == atan2(-0., -1.)) exit(0);
++ else exit(1);
++}
++]])],
++[ac_cv_tanh_preserves_zero_sign=yes],
++[ac_cv_tanh_preserves_zero_sign=no],
++[ac_cv_tanh_preserves_zero_sign=no])])
++AC_MSG_RESULT($ac_cv_tanh_preserves_zero_sign)
++if test "$ac_cv_tanh_preserves_zero_sign" = yes
++then
++ AC_DEFINE(TANH_PRESERVES_ZERO_SIGN, 1,
++ [Define if tanh(-0.) is -0., or if platform doesn't have signed zeros])
++fi
++
++AC_CHECK_FUNCS([acosh asinh atanh copysign erf erfc expm1 finite gamma])
++AC_CHECK_FUNCS([hypot lgamma log1p round tgamma])
++AC_CHECK_DECLS([isinf, isnan, isfinite], [], [], [[#include <math.h>]])
++
++LIBS=$LIBS_SAVE
++
++# For multiprocessing module, check that sem_open
++# actually works. For FreeBSD versions <= 7.2,
++# the kernel module that provides POSIX semaphores
++# isn't loaded by default, so an attempt to call
++# sem_open results in a 'Signal 12' error.
++AC_MSG_CHECKING(whether POSIX semaphores are enabled)
++AC_CACHE_VAL(ac_cv_posix_semaphores_enabled,
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <unistd.h>
++#include <fcntl.h>
++#include <stdio.h>
++#include <semaphore.h>
++#include <sys/stat.h>
++
++int main(void) {
++ sem_t *a = sem_open("/autoconf", O_CREAT, S_IRUSR|S_IWUSR, 0);
++ if (a == SEM_FAILED) {
++ perror("sem_open");
++ return 1;
++ }
++ sem_close(a);
++ sem_unlink("/autoconf");
++ return 0;
++}
++]])],
++[ac_cv_posix_semaphores_enabled=yes],
++[ac_cv_posix_semaphores_enabled=no],
++[ac_cv_posix_semaphores_enabled=yes])
++)
++AC_MSG_RESULT($ac_cv_posix_semaphores_enabled)
++if test $ac_cv_posix_semaphores_enabled = no
++then
++ AC_DEFINE(POSIX_SEMAPHORES_NOT_ENABLED, 1,
++ [Define if POSIX semaphores aren't enabled on your system])
++fi
++
++# Multiprocessing check for broken sem_getvalue
++AC_MSG_CHECKING(for broken sem_getvalue)
++AC_CACHE_VAL(ac_cv_broken_sem_getvalue,
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <unistd.h>
++#include <fcntl.h>
++#include <stdio.h>
++#include <semaphore.h>
++#include <sys/stat.h>
++
++int main(void){
++ sem_t *a = sem_open("/autocftw", O_CREAT, S_IRUSR|S_IWUSR, 0);
++ int count;
++ int res;
++ if(a==SEM_FAILED){
++ perror("sem_open");
++ return 1;
++
++ }
++ res = sem_getvalue(a, &count);
++ sem_close(a);
++ sem_unlink("/autocftw");
++ return res==-1 ? 1 : 0;
++}
++]])],
++[ac_cv_broken_sem_getvalue=no],
++[ac_cv_broken_sem_getvalue=yes],
++[ac_cv_broken_sem_getvalue=yes])
++)
++AC_MSG_RESULT($ac_cv_broken_sem_getvalue)
++if test $ac_cv_broken_sem_getvalue = yes
++then
++ AC_DEFINE(HAVE_BROKEN_SEM_GETVALUE, 1,
++ [define to 1 if your sem_getvalue is broken.])
++fi
++
++# determine what size digit to use for Python's longs
++AC_MSG_CHECKING([digit size for Python's longs])
++AC_ARG_ENABLE(big-digits,
++AS_HELP_STRING([--enable-big-digits@<:@=BITS@:>@],[use big digits for Python longs [[BITS=30]]]),
++[case $enable_big_digits in
++yes)
++ enable_big_digits=30 ;;
++no)
++ enable_big_digits=15 ;;
++[15|30])
++ ;;
++*)
++ AC_MSG_ERROR([bad value $enable_big_digits for --enable-big-digits; value should be 15 or 30]) ;;
++esac
++AC_MSG_RESULT($enable_big_digits)
++AC_DEFINE_UNQUOTED(PYLONG_BITS_IN_DIGIT, $enable_big_digits, [Define as the preferred size in bits of long digits])
++],
++[AC_MSG_RESULT(no value specified)])
++
++# check for wchar.h
++AC_CHECK_HEADER(wchar.h, [
++ AC_DEFINE(HAVE_WCHAR_H, 1,
++ [Define if the compiler provides a wchar.h header file.])
++ wchar_h="yes"
++],
++wchar_h="no"
++)
++
++# determine wchar_t size
++if test "$wchar_h" = yes
++then
++ AC_CHECK_SIZEOF(wchar_t, 4, [#include <wchar.h>])
++fi
++
++AC_MSG_CHECKING(for UCS-4 tcl)
++have_ucs4_tcl=no
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
++#include <tcl.h>
++#if TCL_UTF_MAX != 6
++# error "NOT UCS4_TCL"
++#endif]], [[]])],[
++ AC_DEFINE(HAVE_UCS4_TCL, 1, [Define this if you have tcl and TCL_UTF_MAX==6])
++ have_ucs4_tcl=yes
++],[])
++AC_MSG_RESULT($have_ucs4_tcl)
++
++# check whether wchar_t is signed or not
++if test "$wchar_h" = yes
++then
++ # check whether wchar_t is signed or not
++ AC_MSG_CHECKING(whether wchar_t is signed)
++ AC_CACHE_VAL(ac_cv_wchar_t_signed, [
++ AC_RUN_IFELSE([AC_LANG_SOURCE([[
++ #include <wchar.h>
++ int main()
++ {
++ /* Success: exit code 0 */
++ exit((((wchar_t) -1) < ((wchar_t) 0)) ? 0 : 1);
++ }
++ ]])],
++ [ac_cv_wchar_t_signed=yes],
++ [ac_cv_wchar_t_signed=no],
++ [ac_cv_wchar_t_signed=yes])])
++ AC_MSG_RESULT($ac_cv_wchar_t_signed)
++fi
++
++AC_MSG_CHECKING(what type to use for unicode)
++dnl quadrigraphs "@<:@" and "@:>@" produce "[" and "]" in the output
++AC_ARG_ENABLE(unicode,
++ AS_HELP_STRING([--enable-unicode@<:@=ucs@<:@24@:>@@:>@], [Enable Unicode strings (default is ucs2)]),
++ [],
++ [enable_unicode=yes])
++
++if test $enable_unicode = yes
++then
++ # Without any arguments, Py_UNICODE defaults to two-byte mode
++ case "$have_ucs4_tcl" in
++ yes) enable_unicode="ucs4"
++ ;;
++ *) enable_unicode="ucs2"
++ ;;
++ esac
++fi
++
++AH_TEMPLATE(Py_UNICODE_SIZE,
++ [Define as the size of the unicode type.])
++case "$enable_unicode" in
++ucs2) unicode_size="2"
++ AC_DEFINE(Py_UNICODE_SIZE,2)
++ ;;
++ucs4) unicode_size="4"
++ AC_DEFINE(Py_UNICODE_SIZE,4)
++ ;;
++no) ;; # To allow --disable-unicode
++*) AC_MSG_ERROR([invalid value for --enable-unicode. Use either ucs2 or ucs4 (lowercase).]) ;;
++esac
++
++AH_TEMPLATE(PY_UNICODE_TYPE,
++ [Define as the integral type used for Unicode representation.])
++
++AC_SUBST(UNICODE_OBJS)
++if test "$enable_unicode" = "no"
++then
++ UNICODE_OBJS=""
++ AC_MSG_RESULT(not used)
++else
++ UNICODE_OBJS="Objects/unicodeobject.o Objects/unicodectype.o"
++ AC_DEFINE(Py_USING_UNICODE, 1,
++ [Define if you want to have a Unicode type.])
++
++ # wchar_t is only usable if it maps to an unsigned type
++ if test "$unicode_size" = "$ac_cv_sizeof_wchar_t" \
++ -a "$ac_cv_wchar_t_signed" = "no"
++ then
++ PY_UNICODE_TYPE="wchar_t"
++ AC_DEFINE(HAVE_USABLE_WCHAR_T, 1,
++ [Define if you have a useable wchar_t type defined in wchar.h; useable
++ means wchar_t must be an unsigned type with at least 16 bits. (see
++ Include/unicodeobject.h).])
++ AC_DEFINE(PY_UNICODE_TYPE,wchar_t)
++ elif test "$ac_cv_sizeof_short" = "$unicode_size"
++ then
++ PY_UNICODE_TYPE="unsigned short"
++ AC_DEFINE(PY_UNICODE_TYPE,unsigned short)
++ elif test "$ac_cv_sizeof_long" = "$unicode_size"
++ then
++ PY_UNICODE_TYPE="unsigned long"
++ AC_DEFINE(PY_UNICODE_TYPE,unsigned long)
++ else
++ PY_UNICODE_TYPE="no type found"
++ fi
++ AC_MSG_RESULT($PY_UNICODE_TYPE)
++fi
++
++# check for endianness
++AC_C_BIGENDIAN
++
++# Check whether right shifting a negative integer extends the sign bit
++# or fills with zeros (like the Cray J90, according to Tim Peters).
++AC_MSG_CHECKING(whether right shift extends the sign bit)
++AC_CACHE_VAL(ac_cv_rshift_extends_sign, [
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++int main()
++{
++ exit(((-1)>>3 == -1) ? 0 : 1);
++}
++]])],
++[ac_cv_rshift_extends_sign=yes],
++[ac_cv_rshift_extends_sign=no],
++[ac_cv_rshift_extends_sign=yes])])
++AC_MSG_RESULT($ac_cv_rshift_extends_sign)
++if test "$ac_cv_rshift_extends_sign" = no
++then
++ AC_DEFINE(SIGNED_RIGHT_SHIFT_ZERO_FILLS, 1,
++ [Define if i>>j for signed int i does not extend the sign bit
++ when i < 0])
++fi
++
++# check for getc_unlocked and related locking functions
++AC_MSG_CHECKING(for getc_unlocked() and friends)
++AC_CACHE_VAL(ac_cv_have_getc_unlocked, [
++AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <stdio.h>]], [[
++ FILE *f = fopen("/dev/null", "r");
++ flockfile(f);
++ getc_unlocked(f);
++ funlockfile(f);
++]])],[ac_cv_have_getc_unlocked=yes],[ac_cv_have_getc_unlocked=no])])
++AC_MSG_RESULT($ac_cv_have_getc_unlocked)
++if test "$ac_cv_have_getc_unlocked" = yes
++then
++ AC_DEFINE(HAVE_GETC_UNLOCKED, 1,
++ [Define this if you have flockfile(), getc_unlocked(), and funlockfile()])
++fi
++
++# check where readline lives
++# save the value of LIBS so we don't actually link Python with readline
++LIBS_no_readline=$LIBS
++
++# On some systems we need to link readline to a termcap compatible
++# library. NOTE: Keep the precedence of listed libraries synchronised
++# with setup.py.
++py_cv_lib_readline=no
++AC_MSG_CHECKING([how to link readline libs])
++for py_libtermcap in "" ncursesw ncurses curses termcap; do
++ if test -z "$py_libtermcap"; then
++ READLINE_LIBS="-lreadline"
++ else
++ READLINE_LIBS="-lreadline -l$py_libtermcap"
++ fi
++ LIBS="$READLINE_LIBS $LIBS_no_readline"
++ AC_LINK_IFELSE(
++ [AC_LANG_CALL([],[readline])],
++ [py_cv_lib_readline=yes])
++ if test $py_cv_lib_readline = yes; then
++ break
++ fi
++done
++# Uncomment this line if you want to use READINE_LIBS in Makefile or scripts
++#AC_SUBST([READLINE_LIBS])
++if test $py_cv_lib_readline = no; then
++ AC_MSG_RESULT([none])
++else
++ AC_MSG_RESULT([$READLINE_LIBS])
++ AC_DEFINE(HAVE_LIBREADLINE, 1,
++ [Define if you have the readline library (-lreadline).])
++fi
++
++# check for readline 2.1
++AC_CHECK_LIB(readline, rl_callback_handler_install,
++ AC_DEFINE(HAVE_RL_CALLBACK, 1,
++ [Define if you have readline 2.1]), ,$READLINE_LIBS)
++
++# check for readline 2.2
++AC_PREPROC_IFELSE([AC_LANG_SOURCE([[#include <readline/readline.h>]])],
++ [have_readline=yes],
++ [have_readline=no]
++)
++if test $have_readline = yes
++then
++ AC_EGREP_HEADER([extern int rl_completion_append_character;],
++ [readline/readline.h],
++ AC_DEFINE(HAVE_RL_COMPLETION_APPEND_CHARACTER, 1,
++ [Define if you have readline 2.2]), )
++ AC_EGREP_HEADER([extern int rl_completion_suppress_append;],
++ [readline/readline.h],
++ AC_DEFINE(HAVE_RL_COMPLETION_SUPPRESS_APPEND, 1,
++ [Define if you have rl_completion_suppress_append]), )
++fi
++
++# check for readline 4.0
++AC_CHECK_LIB(readline, rl_pre_input_hook,
++ AC_DEFINE(HAVE_RL_PRE_INPUT_HOOK, 1,
++ [Define if you have readline 4.0]), ,$READLINE_LIBS)
++
++# also in 4.0
++AC_CHECK_LIB(readline, rl_completion_display_matches_hook,
++ AC_DEFINE(HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK, 1,
++ [Define if you have readline 4.0]), ,$READLINE_LIBS)
++
++# check for readline 4.2
++AC_CHECK_LIB(readline, rl_completion_matches,
++ AC_DEFINE(HAVE_RL_COMPLETION_MATCHES, 1,
++ [Define if you have readline 4.2]), ,$READLINE_LIBS)
++
++# also in readline 4.2
++AC_PREPROC_IFELSE([AC_LANG_SOURCE([[#include <readline/readline.h>]])],
++ [have_readline=yes],
++ [have_readline=no]
++)
++if test $have_readline = yes
++then
++ AC_EGREP_HEADER([extern int rl_catch_signals;],
++ [readline/readline.h],
++ AC_DEFINE(HAVE_RL_CATCH_SIGNAL, 1,
++ [Define if you can turn off readline's signal handling.]), )
++fi
++
++# End of readline checks: restore LIBS
++LIBS=$LIBS_no_readline
++
++AC_MSG_CHECKING(for broken nice())
++AC_CACHE_VAL(ac_cv_broken_nice, [
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++int main()
++{
++ int val1 = nice(1);
++ if (val1 != -1 && val1 == nice(2))
++ exit(0);
++ exit(1);
++}
++]])],
++[ac_cv_broken_nice=yes],
++[ac_cv_broken_nice=no],
++[ac_cv_broken_nice=no])])
++AC_MSG_RESULT($ac_cv_broken_nice)
++if test "$ac_cv_broken_nice" = yes
++then
++ AC_DEFINE(HAVE_BROKEN_NICE, 1,
++ [Define if nice() returns success/failure instead of the new priority.])
++fi
++
++AC_MSG_CHECKING(for broken poll())
++AC_CACHE_VAL(ac_cv_broken_poll,
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <poll.h>
++
++int main()
++{
++ struct pollfd poll_struct = { 42, POLLIN|POLLPRI|POLLOUT, 0 };
++ int poll_test;
++
++ close (42);
++
++ poll_test = poll(&poll_struct, 1, 0);
++ if (poll_test < 0)
++ return 0;
++ else if (poll_test == 0 && poll_struct.revents != POLLNVAL)
++ return 0;
++ else
++ return 1;
++}
++]])],
++[ac_cv_broken_poll=yes],
++[ac_cv_broken_poll=no],
++[ac_cv_broken_poll=no]))
++AC_MSG_RESULT($ac_cv_broken_poll)
++if test "$ac_cv_broken_poll" = yes
++then
++ AC_DEFINE(HAVE_BROKEN_POLL, 1,
++ [Define if poll() sets errno on invalid file descriptors.])
++fi
++
++# Before we can test tzset, we need to check if struct tm has a tm_zone
++# (which is not required by ISO C or UNIX spec) and/or if we support
++# tzname[]
++AC_STRUCT_TIMEZONE
++
++# check tzset(3) exists and works like we expect it to
++AC_MSG_CHECKING(for working tzset())
++AC_CACHE_VAL(ac_cv_working_tzset, [
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <stdlib.h>
++#include <time.h>
++#include <string.h>
++
++#if HAVE_TZNAME
++extern char *tzname[];
++#endif
++
++int main()
++{
++ /* Note that we need to ensure that not only does tzset(3)
++ do 'something' with localtime, but it works as documented
++ in the library reference and as expected by the test suite.
++ This includes making sure that tzname is set properly if
++ tm->tm_zone does not exist since it is the alternative way
++ of getting timezone info.
++
++ Red Hat 6.2 doesn't understand the southern hemisphere
++ after New Year's Day.
++ */
++
++ time_t groundhogday = 1044144000; /* GMT-based */
++ time_t midyear = groundhogday + (365 * 24 * 3600 / 2);
++
++ putenv("TZ=UTC+0");
++ tzset();
++ if (localtime(&groundhogday)->tm_hour != 0)
++ exit(1);
++#if HAVE_TZNAME
++ /* For UTC, tzname[1] is sometimes "", sometimes " " */
++ if (strcmp(tzname[0], "UTC") ||
++ (tzname[1][0] != 0 && tzname[1][0] != ' '))
++ exit(1);
++#endif
++
++ putenv("TZ=EST+5EDT,M4.1.0,M10.5.0");
++ tzset();
++ if (localtime(&groundhogday)->tm_hour != 19)
++ exit(1);
++#if HAVE_TZNAME
++ if (strcmp(tzname[0], "EST") || strcmp(tzname[1], "EDT"))
++ exit(1);
++#endif
++
++ putenv("TZ=AEST-10AEDT-11,M10.5.0,M3.5.0");
++ tzset();
++ if (localtime(&groundhogday)->tm_hour != 11)
++ exit(1);
++#if HAVE_TZNAME
++ if (strcmp(tzname[0], "AEST") || strcmp(tzname[1], "AEDT"))
++ exit(1);
++#endif
++
++#if HAVE_STRUCT_TM_TM_ZONE
++ if (strcmp(localtime(&groundhogday)->tm_zone, "AEDT"))
++ exit(1);
++ if (strcmp(localtime(&midyear)->tm_zone, "AEST"))
++ exit(1);
++#endif
++
++ exit(0);
++}
++]])],
++[ac_cv_working_tzset=yes],
++[ac_cv_working_tzset=no],
++[ac_cv_working_tzset=no])])
++AC_MSG_RESULT($ac_cv_working_tzset)
++if test "$ac_cv_working_tzset" = yes
++then
++ AC_DEFINE(HAVE_WORKING_TZSET, 1,
++ [Define if tzset() actually switches the local timezone in a meaningful way.])
++fi
++
++# Look for subsecond timestamps in struct stat
++AC_MSG_CHECKING(for tv_nsec in struct stat)
++AC_CACHE_VAL(ac_cv_stat_tv_nsec,
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/stat.h>]], [[
++struct stat st;
++st.st_mtim.tv_nsec = 1;
++]])],
++[ac_cv_stat_tv_nsec=yes],
++[ac_cv_stat_tv_nsec=no]))
++AC_MSG_RESULT($ac_cv_stat_tv_nsec)
++if test "$ac_cv_stat_tv_nsec" = yes
++then
++ AC_DEFINE(HAVE_STAT_TV_NSEC, 1,
++ [Define if you have struct stat.st_mtim.tv_nsec])
++fi
++
++# Look for BSD style subsecond timestamps in struct stat
++AC_MSG_CHECKING(for tv_nsec2 in struct stat)
++AC_CACHE_VAL(ac_cv_stat_tv_nsec2,
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/stat.h>]], [[
++struct stat st;
++st.st_mtimespec.tv_nsec = 1;
++]])],
++[ac_cv_stat_tv_nsec2=yes],
++[ac_cv_stat_tv_nsec2=no]))
++AC_MSG_RESULT($ac_cv_stat_tv_nsec2)
++if test "$ac_cv_stat_tv_nsec2" = yes
++then
++ AC_DEFINE(HAVE_STAT_TV_NSEC2, 1,
++ [Define if you have struct stat.st_mtimensec])
++fi
++
++# On HP/UX 11.0, mvwdelch is a block with a return statement
++AC_MSG_CHECKING(whether mvwdelch is an expression)
++AC_CACHE_VAL(ac_cv_mvwdelch_is_expression,
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <curses.h>]], [[
++ int rtn;
++ rtn = mvwdelch(0,0,0);
++]])],
++[ac_cv_mvwdelch_is_expression=yes],
++[ac_cv_mvwdelch_is_expression=no]))
++AC_MSG_RESULT($ac_cv_mvwdelch_is_expression)
++
++if test "$ac_cv_mvwdelch_is_expression" = yes
++then
++ AC_DEFINE(MVWDELCH_IS_EXPRESSION, 1,
++ [Define if mvwdelch in curses.h is an expression.])
++fi
++
++AC_MSG_CHECKING(whether WINDOW has _flags)
++AC_CACHE_VAL(ac_cv_window_has_flags,
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <curses.h>]], [[
++ WINDOW *w;
++ w->_flags = 0;
++]])],
++[ac_cv_window_has_flags=yes],
++[ac_cv_window_has_flags=no]))
++AC_MSG_RESULT($ac_cv_window_has_flags)
++
++
++if test "$ac_cv_window_has_flags" = yes
++then
++ AC_DEFINE(WINDOW_HAS_FLAGS, 1,
++ [Define if WINDOW in curses.h offers a field _flags.])
++fi
++
++AC_MSG_CHECKING(for is_term_resized)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <curses.h>]], [[void *x=is_term_resized]])],
++ [AC_DEFINE(HAVE_CURSES_IS_TERM_RESIZED, 1, Define if you have the 'is_term_resized' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)]
++)
++
++AC_MSG_CHECKING(for resize_term)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <curses.h>]], [[void *x=resize_term]])],
++ [AC_DEFINE(HAVE_CURSES_RESIZE_TERM, 1, Define if you have the 'resize_term' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)]
++)
++
++AC_MSG_CHECKING(for resizeterm)
++AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <curses.h>]], [[void *x=resizeterm]])],
++ [AC_DEFINE(HAVE_CURSES_RESIZETERM, 1, Define if you have the 'resizeterm' function.)
++ AC_MSG_RESULT(yes)],
++ [AC_MSG_RESULT(no)]
++)
++
++AC_MSG_CHECKING(for /dev/ptmx)
++
++if test -r /dev/ptmx
++then
++ AC_MSG_RESULT(yes)
++ AC_DEFINE(HAVE_DEV_PTMX, 1,
++ [Define if we have /dev/ptmx.])
++else
++ AC_MSG_RESULT(no)
++fi
++
++AC_MSG_CHECKING(for /dev/ptc)
++
++if test -r /dev/ptc
++then
++ AC_MSG_RESULT(yes)
++ AC_DEFINE(HAVE_DEV_PTC, 1,
++ [Define if we have /dev/ptc.])
++else
++ AC_MSG_RESULT(no)
++fi
++
++if test "$have_long_long" = yes
++then
++ AC_MSG_CHECKING(for %lld and %llu printf() format support)
++ AC_CACHE_VAL(ac_cv_have_long_long_format,
++ AC_RUN_IFELSE([AC_LANG_SOURCE([[[
++ #include <stdio.h>
++ #include <stddef.h>
++ #include <string.h>
++
++ #ifdef HAVE_SYS_TYPES_H
++ #include <sys/types.h>
++ #endif
++
++ int main()
++ {
++ char buffer[256];
++
++ if (sprintf(buffer, "%lld", (long long)123) < 0)
++ return 1;
++ if (strcmp(buffer, "123"))
++ return 1;
++
++ if (sprintf(buffer, "%lld", (long long)-123) < 0)
++ return 1;
++ if (strcmp(buffer, "-123"))
++ return 1;
++
++ if (sprintf(buffer, "%llu", (unsigned long long)123) < 0)
++ return 1;
++ if (strcmp(buffer, "123"))
++ return 1;
++
++ return 0;
++ }
++ ]]])],
++ [ac_cv_have_long_long_format=yes],
++ [ac_cv_have_long_long_format=no],
++ [ac_cv_have_long_long_format=no])
++ )
++ AC_MSG_RESULT($ac_cv_have_long_long_format)
++fi
++
++if test "$ac_cv_have_long_long_format" = yes
++then
++ AC_DEFINE(PY_FORMAT_LONG_LONG, "ll",
++ [Define to printf format modifier for long long type])
++fi
++
++if test $ac_sys_system = Darwin
++then
++ LIBS="$LIBS -framework CoreFoundation"
++fi
++
++
++AC_CACHE_CHECK([for %zd printf() format support], ac_cv_have_size_t_format, [dnl
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++#include <stdio.h>
++#include <stddef.h>
++#include <string.h>
++
++#ifdef HAVE_SYS_TYPES_H
++#include <sys/types.h>
++#endif
++
++#ifdef HAVE_SSIZE_T
++typedef ssize_t Py_ssize_t;
++#elif SIZEOF_VOID_P == SIZEOF_LONG
++typedef long Py_ssize_t;
++#else
++typedef int Py_ssize_t;
++#endif
++
++int main()
++{
++ char buffer[256];
++
++ if(sprintf(buffer, "%zd", (size_t)123) < 0)
++ return 1;
++
++ if (strcmp(buffer, "123"))
++ return 1;
++
++ if (sprintf(buffer, "%zd", (Py_ssize_t)-123) < 0)
++ return 1;
++
++ if (strcmp(buffer, "-123"))
++ return 1;
++
++ return 0;
++}
++]])],
++[ac_cv_have_size_t_format=yes],
++[ac_cv_have_size_t_format=no],
++[ac_cv_have_size_t_format="cross -- assuming yes"
++])])
++if test "$ac_cv_have_size_t_format" != no ; then
++ AC_DEFINE(PY_FORMAT_SIZE_T, "z",
++ [Define to printf format modifier for Py_ssize_t])
++fi
++
++AC_CHECK_TYPE(socklen_t,,
++ AC_DEFINE(socklen_t,int,
++ [Define to `int' if <sys/socket.h> does not define.]),[
++#ifdef HAVE_SYS_TYPES_H
++#include <sys/types.h>
++#endif
++#ifdef HAVE_SYS_SOCKET_H
++#include <sys/socket.h>
++#endif
++])
++
++case $ac_sys_system in
++AIX*)
++ AC_DEFINE(HAVE_BROKEN_PIPE_BUF, 1, [Define if the system reports an invalid PIPE_BUF value.]) ;;
++esac
++
++
++AC_SUBST(THREADHEADERS)
++
++for h in `(cd $srcdir;echo Python/thread_*.h)`
++do
++ THREADHEADERS="$THREADHEADERS \$(srcdir)/$h"
++done
++
++AC_SUBST(SRCDIRS)
++SRCDIRS="Parser Grammar Objects Python Modules Mac"
++AC_MSG_CHECKING(for build directories)
++for dir in $SRCDIRS; do
++ if test ! -d $dir; then
++ mkdir $dir
++ fi
++done
++AC_MSG_RESULT(done)
++
++# generate output files
++AC_CONFIG_FILES(Makefile.pre Modules/Setup.config Misc/python.pc)
++AC_CONFIG_FILES([Modules/ld_so_aix], [chmod +x Modules/ld_so_aix])
++AC_OUTPUT
++
++echo "creating Modules/Setup"
++if test ! -f Modules/Setup
++then
++ cp $srcdir/Modules/Setup.dist Modules/Setup
++fi
++
++echo "creating Modules/Setup.local"
++if test ! -f Modules/Setup.local
++then
++ echo "# Edit this file for local setup changes" >Modules/Setup.local
++fi
++
++echo "creating Makefile"
++$SHELL $srcdir/Modules/makesetup -c $srcdir/Modules/config.c.in \
++ -s Modules Modules/Setup.config \
++ Modules/Setup.local Modules/Setup
++
++case $ac_sys_system in
++BeOS)
++ AC_MSG_WARN([
++
++ Support for BeOS is deprecated as of Python 2.6.
++ See PEP 11 for the gory details.
++ ])
++ ;;
++*) ;;
++esac
++
++mv config.c Modules
+diff -r 70274d53c1dd configure.in
+--- a/configure.in
++++ /dev/null
+@@ -1,4355 +0,0 @@
+-dnl ***********************************************
+-dnl * Please run autoreconf to test your changes! *
+-dnl ***********************************************
+-
+-# Set VERSION so we only need to edit in one place (i.e., here)
+-m4_define(PYTHON_VERSION, 2.7)
+-
+-AC_PREREQ(2.65)
+-
+-AC_REVISION($Revision$)
+-AC_INIT(python, PYTHON_VERSION, http://bugs.python.org/)
+-AC_CONFIG_SRCDIR([Include/object.h])
+-AC_CONFIG_HEADER(pyconfig.h)
+-
+-dnl Ensure that if prefix is specified, it does not end in a slash. If
+-dnl it does, we get path names containing '//' which is both ugly and
+-dnl can cause trouble.
+-
+-dnl Last slash shouldn't be stripped if prefix=/
+-if test "$prefix" != "/"; then
+- prefix=`echo "$prefix" | sed -e 's/\/$//g'`
+-fi
+-
+-dnl This is for stuff that absolutely must end up in pyconfig.h.
+-dnl Please use pyport.h instead, if possible.
+-AH_TOP([
+-#ifndef Py_PYCONFIG_H
+-#define Py_PYCONFIG_H
+-])
+-AH_BOTTOM([
+-/* Define the macros needed if on a UnixWare 7.x system. */
+-#if defined(__USLC__) && defined(__SCO_VERSION__)
+-#define STRICT_SYSV_CURSES /* Don't use ncurses extensions */
+-#endif
+-
+-#endif /*Py_PYCONFIG_H*/
+-])
+-
+-# We don't use PACKAGE_ variables, and they cause conflicts
+-# with other autoconf-based packages that include Python.h
+-grep -v 'define PACKAGE_' <confdefs.h >confdefs.h.new
+-rm confdefs.h
+-mv confdefs.h.new confdefs.h
+-
+-AC_SUBST(VERSION)
+-VERSION=PYTHON_VERSION
+-
+-AC_SUBST(SOVERSION)
+-SOVERSION=1.0
+-
+-# The later defininition of _XOPEN_SOURCE disables certain features
+-# on Linux, so we need _GNU_SOURCE to re-enable them (makedev, tm_zone).
+-AC_DEFINE(_GNU_SOURCE, 1, [Define on Linux to activate all library features])
+-
+-# The later defininition of _XOPEN_SOURCE and _POSIX_C_SOURCE disables
+-# certain features on NetBSD, so we need _NETBSD_SOURCE to re-enable
+-# them.
+-AC_DEFINE(_NETBSD_SOURCE, 1, [Define on NetBSD to activate all library features])
+-
+-# The later defininition of _XOPEN_SOURCE and _POSIX_C_SOURCE disables
+-# certain features on FreeBSD, so we need __BSD_VISIBLE to re-enable
+-# them.
+-AC_DEFINE(__BSD_VISIBLE, 1, [Define on FreeBSD to activate all library features])
+-
+-# The later defininition of _XOPEN_SOURCE and _POSIX_C_SOURCE disables
+-# u_int on Irix 5.3. Defining _BSD_TYPES brings it back.
+-AC_DEFINE(_BSD_TYPES, 1, [Define on Irix to enable u_int])
+-
+-# The later defininition of _XOPEN_SOURCE and _POSIX_C_SOURCE disables
+-# certain features on Mac OS X, so we need _DARWIN_C_SOURCE to re-enable
+-# them.
+-AC_DEFINE(_DARWIN_C_SOURCE, 1, [Define on Darwin to activate all library features])
+-
+-
+-define_xopen_source=yes
+-
+-# Arguments passed to configure.
+-AC_SUBST(CONFIG_ARGS)
+-CONFIG_ARGS="$ac_configure_args"
+-
+-AC_MSG_CHECKING([for --enable-universalsdk])
+-AC_ARG_ENABLE(universalsdk,
+- AS_HELP_STRING([--enable-universalsdk@<:@=SDKDIR@:>@], [Build against Mac OS X 10.4u SDK (ppc/i386)]),
+-[
+- case $enableval in
+- yes)
+- enableval=/Developer/SDKs/MacOSX10.4u.sdk
+- if test ! -d "${enableval}"
+- then
+- enableval=/
+- fi
+- ;;
+- esac
+- case $enableval in
+- no)
+- UNIVERSALSDK=
+- enable_universalsdk=
+- ;;
+- *)
+- UNIVERSALSDK=$enableval
+- if test ! -d "${UNIVERSALSDK}"
+- then
+- AC_MSG_ERROR([--enable-universalsdk specifies non-existing SDK: ${UNIVERSALSDK}])
+- fi
+- ;;
+- esac
+-
+-],[
+- UNIVERSALSDK=
+- enable_universalsdk=
+-])
+-if test -n "${UNIVERSALSDK}"
+-then
+- AC_MSG_RESULT(${UNIVERSALSDK})
+-else
+- AC_MSG_RESULT(no)
+-fi
+-AC_SUBST(UNIVERSALSDK)
+-
+-AC_SUBST(ARCH_RUN_32BIT)
+-
+-UNIVERSAL_ARCHS="32-bit"
+-AC_SUBST(LIPO_32BIT_FLAGS)
+-AC_MSG_CHECKING(for --with-universal-archs)
+-AC_ARG_WITH(universal-archs,
+- AS_HELP_STRING([--with-universal-archs=ARCH], [select architectures for universal build ("32-bit", "64-bit", "3-way", "intel" or "all")]),
+-[
+- AC_MSG_RESULT($withval)
+- UNIVERSAL_ARCHS="$withval"
+- if test "${enable_universalsdk}" ; then
+- :
+- else
+- AC_MSG_ERROR([--with-universal-archs without --enable-universalsdk. See Mac/README])
+- fi
+-],
+-[
+- AC_MSG_RESULT(32-bit)
+-])
+-
+-
+-
+-AC_ARG_WITH(framework-name,
+- AS_HELP_STRING([--with-framework-name=FRAMEWORK],
+- [specify an alternate name of the framework built with --enable-framework]),
+-[
+- if test "${enable_framework}"; then
+- :
+- else
+- AC_MSG_ERROR([--with-framework-name without --enable-framework. See Mac/README])
+- fi
+- PYTHONFRAMEWORK=${withval}
+- PYTHONFRAMEWORKDIR=${withval}.framework
+- PYTHONFRAMEWORKIDENTIFIER=org.python.`echo $withval | tr '[A-Z]' '[a-z]'`
+- ],[
+- PYTHONFRAMEWORK=Python
+- PYTHONFRAMEWORKDIR=Python.framework
+- PYTHONFRAMEWORKIDENTIFIER=org.python.python
+-])
+-dnl quadrigraphs "@<:@" and "@:>@" produce "[" and "]" in the output
+-AC_ARG_ENABLE(framework,
+- AS_HELP_STRING([--enable-framework@<:@=INSTALLDIR@:>@], [Build (MacOSX|Darwin) framework]),
+-[
+- case $enableval in
+- yes)
+- enableval=/Library/Frameworks
+- esac
+- case $enableval in
+- no)
+- PYTHONFRAMEWORK=
+- PYTHONFRAMEWORKDIR=no-framework
+- PYTHONFRAMEWORKPREFIX=
+- PYTHONFRAMEWORKINSTALLDIR=
+- FRAMEWORKINSTALLFIRST=
+- FRAMEWORKINSTALLLAST=
+- FRAMEWORKALTINSTALLFIRST=
+- FRAMEWORKALTINSTALLLAST=
+- if test "x${prefix}" = "xNONE"; then
+- FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}"
+- else
+- FRAMEWORKUNIXTOOLSPREFIX="${prefix}"
+- fi
+- enable_framework=
+- ;;
+- *)
+- PYTHONFRAMEWORKPREFIX="${enableval}"
+- PYTHONFRAMEWORKINSTALLDIR=$PYTHONFRAMEWORKPREFIX/$PYTHONFRAMEWORKDIR
+- FRAMEWORKINSTALLFIRST="frameworkinstallstructure"
+- FRAMEWORKALTINSTALLFIRST="frameworkinstallstructure bininstall maninstall"
+- FRAMEWORKINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools"
+- FRAMEWORKALTINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkaltinstallunixtools"
+- FRAMEWORKINSTALLAPPSPREFIX="/Applications"
+-
+- if test "x${prefix}" = "xNONE" ; then
+- FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}"
+-
+- else
+- FRAMEWORKUNIXTOOLSPREFIX="${prefix}"
+- fi
+-
+- case "${enableval}" in
+- /System*)
+- FRAMEWORKINSTALLAPPSPREFIX="/Applications"
+- if test "${prefix}" = "NONE" ; then
+- # See below
+- FRAMEWORKUNIXTOOLSPREFIX="/usr"
+- fi
+- ;;
+-
+- /Library*)
+- FRAMEWORKINSTALLAPPSPREFIX="/Applications"
+- ;;
+-
+- */Library/Frameworks)
+- MDIR="`dirname "${enableval}"`"
+- MDIR="`dirname "${MDIR}"`"
+- FRAMEWORKINSTALLAPPSPREFIX="${MDIR}/Applications"
+-
+- if test "${prefix}" = "NONE"; then
+- # User hasn't specified the
+- # --prefix option, but wants to install
+- # the framework in a non-default location,
+- # ensure that the compatibility links get
+- # installed relative to that prefix as well
+- # instead of in /usr/local.
+- FRAMEWORKUNIXTOOLSPREFIX="${MDIR}"
+- fi
+- ;;
+-
+- *)
+- FRAMEWORKINSTALLAPPSPREFIX="/Applications"
+- ;;
+- esac
+-
+- prefix=$PYTHONFRAMEWORKINSTALLDIR/Versions/$VERSION
+-
+- # Add files for Mac specific code to the list of output
+- # files:
+- AC_CONFIG_FILES(Mac/Makefile)
+- AC_CONFIG_FILES(Mac/PythonLauncher/Makefile)
+- AC_CONFIG_FILES(Mac/IDLE/Makefile)
+- AC_CONFIG_FILES(Mac/Resources/framework/Info.plist)
+- AC_CONFIG_FILES(Mac/Resources/app/Info.plist)
+- esac
+- ],[
+- PYTHONFRAMEWORK=
+- PYTHONFRAMEWORKDIR=no-framework
+- PYTHONFRAMEWORKPREFIX=
+- PYTHONFRAMEWORKINSTALLDIR=
+- FRAMEWORKINSTALLFIRST=
+- FRAMEWORKINSTALLLAST=
+- FRAMEWORKALTINSTALLFIRST=
+- FRAMEWORKALTINSTALLLAST=
+- if test "x${prefix}" = "xNONE" ; then
+- FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}"
+- else
+- FRAMEWORKUNIXTOOLSPREFIX="${prefix}"
+- fi
+- enable_framework=
+-
+-])
+-AC_SUBST(PYTHONFRAMEWORK)
+-AC_SUBST(PYTHONFRAMEWORKIDENTIFIER)
+-AC_SUBST(PYTHONFRAMEWORKDIR)
+-AC_SUBST(PYTHONFRAMEWORKPREFIX)
+-AC_SUBST(PYTHONFRAMEWORKINSTALLDIR)
+-AC_SUBST(FRAMEWORKINSTALLFIRST)
+-AC_SUBST(FRAMEWORKINSTALLLAST)
+-AC_SUBST(FRAMEWORKALTINSTALLFIRST)
+-AC_SUBST(FRAMEWORKALTINSTALLLAST)
+-AC_SUBST(FRAMEWORKUNIXTOOLSPREFIX)
+-AC_SUBST(FRAMEWORKINSTALLAPPSPREFIX)
+-
+-##AC_ARG_WITH(dyld,
+-## AS_HELP_STRING([--with-dyld],
+-## [Use (OpenStep|Rhapsody) dynamic linker]))
+-##
+-# Set name for machine-dependent library files
+-AC_SUBST(MACHDEP)
+-AC_MSG_CHECKING(MACHDEP)
+-if test -z "$MACHDEP"
+-then
+- ac_sys_system=`uname -s`
+- if test "$ac_sys_system" = "AIX" \
+- -o "$ac_sys_system" = "UnixWare" -o "$ac_sys_system" = "OpenUNIX"; then
+- ac_sys_release=`uname -v`
+- else
+- ac_sys_release=`uname -r`
+- fi
+- ac_md_system=`echo $ac_sys_system |
+- tr -d '[/ ]' | tr '[[A-Z]]' '[[a-z]]'`
+- ac_md_release=`echo $ac_sys_release |
+- tr -d '[/ ]' | sed 's/^[[A-Z]]\.//' | sed 's/\..*//'`
+- MACHDEP="$ac_md_system$ac_md_release"
+-
+- case $MACHDEP in
+- linux*) MACHDEP="linux2";;
+- cygwin*) MACHDEP="cygwin";;
+- darwin*) MACHDEP="darwin";;
+- atheos*) MACHDEP="atheos";;
+- irix646) MACHDEP="irix6";;
+- '') MACHDEP="unknown";;
+- esac
+-fi
+-
+-# Some systems cannot stand _XOPEN_SOURCE being defined at all; they
+-# disable features if it is defined, without any means to access these
+-# features as extensions. For these systems, we skip the definition of
+-# _XOPEN_SOURCE. Before adding a system to the list to gain access to
+-# some feature, make sure there is no alternative way to access this
+-# feature. Also, when using wildcards, make sure you have verified the
+-# need for not defining _XOPEN_SOURCE on all systems matching the
+-# wildcard, and that the wildcard does not include future systems
+-# (which may remove their limitations).
+-dnl quadrigraphs "@<:@" and "@:>@" produce "[" and "]" in the output
+-case $ac_sys_system/$ac_sys_release in
+- # On OpenBSD, select(2) is not available if _XOPEN_SOURCE is defined,
+- # even though select is a POSIX function. Reported by J. Ribbens.
+- # Reconfirmed for OpenBSD 3.3 by Zachary Hamm, for 3.4 by Jason Ish.
+- # In addition, Stefan Krah confirms that issue #1244610 exists through
+- # OpenBSD 4.6, but is fixed in 4.7.
+- OpenBSD/2.* | OpenBSD/3.* | OpenBSD/4.@<:@0123456@:>@)
+- define_xopen_source=no
+- # OpenBSD undoes our definition of __BSD_VISIBLE if _XOPEN_SOURCE is
+- # also defined. This can be overridden by defining _BSD_SOURCE
+- # As this has a different meaning on Linux, only define it on OpenBSD
+- AC_DEFINE(_BSD_SOURCE, 1, [Define on OpenBSD to activate all library features])
+- ;;
+- OpenBSD/*)
+- # OpenBSD undoes our definition of __BSD_VISIBLE if _XOPEN_SOURCE is
+- # also defined. This can be overridden by defining _BSD_SOURCE
+- # As this has a different meaning on Linux, only define it on OpenBSD
+- AC_DEFINE(_BSD_SOURCE, 1, [Define on OpenBSD to activate all library features])
+- ;;
+- # Defining _XOPEN_SOURCE on NetBSD version prior to the introduction of
+- # _NETBSD_SOURCE disables certain features (eg. setgroups). Reported by
+- # Marc Recht
+- NetBSD/1.5 | NetBSD/1.5.* | NetBSD/1.6 | NetBSD/1.6.* | NetBSD/1.6@<:@A-S@:>@)
+- define_xopen_source=no;;
+- # From the perspective of Solaris, _XOPEN_SOURCE is not so much a
+- # request to enable features supported by the standard as a request
+- # to disable features not supported by the standard. The best way
+- # for Python to use Solaris is simply to leave _XOPEN_SOURCE out
+- # entirely and define __EXTENSIONS__ instead.
+- SunOS/*)
+- define_xopen_source=no;;
+- # On UnixWare 7, u_long is never defined with _XOPEN_SOURCE,
+- # but used in /usr/include/netinet/tcp.h. Reported by Tim Rice.
+- # Reconfirmed for 7.1.4 by Martin v. Loewis.
+- OpenUNIX/8.0.0| UnixWare/7.1.@<:@0-4@:>@)
+- define_xopen_source=no;;
+- # On OpenServer 5, u_short is never defined with _XOPEN_SOURCE,
+- # but used in struct sockaddr.sa_family. Reported by Tim Rice.
+- SCO_SV/3.2)
+- define_xopen_source=no;;
+- # On FreeBSD 4, the math functions C89 does not cover are never defined
+- # with _XOPEN_SOURCE and __BSD_VISIBLE does not re-enable them.
+- FreeBSD/4.*)
+- define_xopen_source=no;;
+- # On MacOS X 10.2, a bug in ncurses.h means that it craps out if
+- # _XOPEN_EXTENDED_SOURCE is defined. Apparently, this is fixed in 10.3, which
+- # identifies itself as Darwin/7.*
+- # On Mac OS X 10.4, defining _POSIX_C_SOURCE or _XOPEN_SOURCE
+- # disables platform specific features beyond repair.
+- # On Mac OS X 10.3, defining _POSIX_C_SOURCE or _XOPEN_SOURCE
+- # has no effect, don't bother defining them
+- Darwin/@<:@6789@:>@.*)
+- define_xopen_source=no;;
+- Darwin/1@<:@0-9@:>@.*)
+- define_xopen_source=no;;
+- # On AIX 4 and 5.1, mbstate_t is defined only when _XOPEN_SOURCE == 500 but
+- # used in wcsnrtombs() and mbsnrtowcs() even if _XOPEN_SOURCE is not defined
+- # or has another value. By not (re)defining it, the defaults come in place.
+- AIX/4)
+- define_xopen_source=no;;
+- AIX/5)
+- if test `uname -r` -eq 1; then
+- define_xopen_source=no
+- fi
+- ;;
+- # On QNX 6.3.2, defining _XOPEN_SOURCE prevents netdb.h from
+- # defining NI_NUMERICHOST.
+- QNX/6.3.2)
+- define_xopen_source=no
+- ;;
+-
+-esac
+-
+-if test $define_xopen_source = yes
+-then
+- AC_DEFINE(_XOPEN_SOURCE, 600,
+- Define to the level of X/Open that your system supports)
+-
+- # On Tru64 Unix 4.0F, defining _XOPEN_SOURCE also requires
+- # definition of _XOPEN_SOURCE_EXTENDED and _POSIX_C_SOURCE, or else
+- # several APIs are not declared. Since this is also needed in some
+- # cases for HP-UX, we define it globally.
+- AC_DEFINE(_XOPEN_SOURCE_EXTENDED, 1,
+- Define to activate Unix95-and-earlier features)
+-
+- AC_DEFINE(_POSIX_C_SOURCE, 200112L, Define to activate features from IEEE Stds 1003.1-2001)
+-
+-fi
+-
+-#
+-# SGI compilers allow the specification of the both the ABI and the
+-# ISA on the command line. Depending on the values of these switches,
+-# different and often incompatable code will be generated.
+-#
+-# The SGI_ABI variable can be used to modify the CC and LDFLAGS and
+-# thus supply support for various ABI/ISA combinations. The MACHDEP
+-# variable is also adjusted.
+-#
+-AC_SUBST(SGI_ABI)
+-if test ! -z "$SGI_ABI"
+-then
+- CC="cc $SGI_ABI"
+- LDFLAGS="$SGI_ABI $LDFLAGS"
+- MACHDEP=`echo "${MACHDEP}${SGI_ABI}" | sed 's/ *//g'`
+-fi
+-AC_MSG_RESULT($MACHDEP)
+-
+-# And add extra plat-mac for darwin
+-AC_SUBST(EXTRAPLATDIR)
+-AC_SUBST(EXTRAMACHDEPPATH)
+-AC_MSG_CHECKING(EXTRAPLATDIR)
+-if test -z "$EXTRAPLATDIR"
+-then
+- case $MACHDEP in
+- darwin)
+- EXTRAPLATDIR="\$(PLATMACDIRS)"
+- EXTRAMACHDEPPATH="\$(PLATMACPATH)"
+- ;;
+- *)
+- EXTRAPLATDIR=""
+- EXTRAMACHDEPPATH=""
+- ;;
+- esac
+-fi
+-AC_MSG_RESULT($EXTRAPLATDIR)
+-
+-# Record the configure-time value of MACOSX_DEPLOYMENT_TARGET,
+-# it may influence the way we can build extensions, so distutils
+-# needs to check it
+-AC_SUBST(CONFIGURE_MACOSX_DEPLOYMENT_TARGET)
+-AC_SUBST(EXPORT_MACOSX_DEPLOYMENT_TARGET)
+-CONFIGURE_MACOSX_DEPLOYMENT_TARGET=
+-EXPORT_MACOSX_DEPLOYMENT_TARGET='#'
+-
+-AC_MSG_CHECKING(machine type as reported by uname -m)
+-ac_sys_machine=`uname -m`
+-AC_MSG_RESULT($ac_sys_machine)
+-
+-# checks for alternative programs
+-
+-# compiler flags are generated in two sets, BASECFLAGS and OPT. OPT is just
+-# for debug/optimization stuff. BASECFLAGS is for flags that are required
+-# just to get things to compile and link. Users are free to override OPT
+-# when running configure or make. The build should not break if they do.
+-# BASECFLAGS should generally not be messed with, however.
+-
+-# XXX shouldn't some/most/all of this code be merged with the stuff later
+-# on that fiddles with OPT and BASECFLAGS?
+-AC_MSG_CHECKING(for --without-gcc)
+-AC_ARG_WITH(gcc,
+- AS_HELP_STRING([--without-gcc], [never use gcc]),
+-[
+- case $withval in
+- no) CC=${CC:-cc}
+- without_gcc=yes;;
+- yes) CC=gcc
+- without_gcc=no;;
+- *) CC=$withval
+- without_gcc=$withval;;
+- esac], [
+- case $ac_sys_system in
+- AIX*) CC=${CC:-xlc_r}
+- without_gcc=;;
+- BeOS*)
+- case $BE_HOST_CPU in
+- ppc)
+- CC=mwcc
+- without_gcc=yes
+- BASECFLAGS="$BASECFLAGS -export pragma"
+- OPT="$OPT -O"
+- LDFLAGS="$LDFLAGS -nodup"
+- ;;
+- x86)
+- CC=gcc
+- without_gcc=no
+- OPT="$OPT -O"
+- ;;
+- *)
+- AC_MSG_ERROR([Unknown BeOS platform "$BE_HOST_CPU"])
+- ;;
+- esac
+- AR="\$(srcdir)/Modules/ar_beos"
+- RANLIB=:
+- ;;
+- *) without_gcc=no;;
+- esac])
+-AC_MSG_RESULT($without_gcc)
+-
+-# If the user switches compilers, we can't believe the cache
+-if test ! -z "$ac_cv_prog_CC" -a ! -z "$CC" -a "$CC" != "$ac_cv_prog_CC"
+-then
+- AC_MSG_ERROR([cached CC is different -- throw away $cache_file
+-(it is also a good idea to do 'make clean' before compiling)])
+-fi
+-
+-# If the user set CFLAGS, use this instead of the automatically
+-# determined setting
+-preset_cflags="$CFLAGS"
+-AC_PROG_CC
+-if test ! -z "$preset_cflags"
+-then
+- CFLAGS=$preset_cflags
+-fi
+-
+-AC_SUBST(CXX)
+-AC_SUBST(MAINCC)
+-AC_MSG_CHECKING(for --with-cxx-main=<compiler>)
+-AC_ARG_WITH(cxx_main,
+- AS_HELP_STRING([--with-cxx-main=<compiler>],
+- [compile main() and link python executable with C++ compiler]),
+-[
+-
+- case $withval in
+- no) with_cxx_main=no
+- MAINCC='$(CC)';;
+- yes) with_cxx_main=yes
+- MAINCC='$(CXX)';;
+- *) with_cxx_main=yes
+- MAINCC=$withval
+- if test -z "$CXX"
+- then
+- CXX=$withval
+- fi;;
+- esac], [
+- with_cxx_main=no
+- MAINCC='$(CC)'
+-])
+-AC_MSG_RESULT($with_cxx_main)
+-
+-preset_cxx="$CXX"
+-if test -z "$CXX"
+-then
+- case "$CC" in
+- gcc) AC_PATH_PROG(CXX, [g++], [g++], [notfound]) ;;
+- cc) AC_PATH_PROG(CXX, [c++], [c++], [notfound]) ;;
+- esac
+- if test "$CXX" = "notfound"
+- then
+- CXX=""
+- fi
+-fi
+-if test -z "$CXX"
+-then
+- AC_CHECK_PROGS(CXX, $CCC c++ g++ gcc CC cxx cc++ cl, notfound)
+- if test "$CXX" = "notfound"
+- then
+- CXX=""
+- fi
+-fi
+-if test "$preset_cxx" != "$CXX"
+-then
+- AC_MSG_WARN([
+-
+- By default, distutils will build C++ extension modules with "$CXX".
+- If this is not intended, then set CXX on the configure command line.
+- ])
+-fi
+-
+-
+-# checks for UNIX variants that set C preprocessor variables
+-AC_USE_SYSTEM_EXTENSIONS
+-
+-# Check for unsupported systems
+-case $ac_sys_system/$ac_sys_release in
+-atheos*|Linux*/1*)
+- echo This system \($ac_sys_system/$ac_sys_release\) is no longer supported.
+- echo See README for details.
+- exit 1;;
+-esac
+-
+-AC_EXEEXT
+-AC_MSG_CHECKING(for --with-suffix)
+-AC_ARG_WITH(suffix,
+- AS_HELP_STRING([--with-suffix=.exe], [set executable suffix]),
+-[
+- case $withval in
+- no) EXEEXT=;;
+- yes) EXEEXT=.exe;;
+- *) EXEEXT=$withval;;
+- esac])
+-AC_MSG_RESULT($EXEEXT)
+-
+-# Test whether we're running on a non-case-sensitive system, in which
+-# case we give a warning if no ext is given
+-AC_SUBST(BUILDEXEEXT)
+-AC_MSG_CHECKING(for case-insensitive build directory)
+-if test ! -d CaseSensitiveTestDir; then
+-mkdir CaseSensitiveTestDir
+-fi
+-
+-if test -d casesensitivetestdir
+-then
+- AC_MSG_RESULT(yes)
+- BUILDEXEEXT=.exe
+-else
+- AC_MSG_RESULT(no)
+- BUILDEXEEXT=$EXEEXT
+-fi
+-rmdir CaseSensitiveTestDir
+-
+-case $MACHDEP in
+-bsdos*)
+- case $CC in
+- gcc) CC="$CC -D_HAVE_BSDI";;
+- esac;;
+-esac
+-
+-case $ac_sys_system in
+-hp*|HP*)
+- case $CC in
+- cc|*/cc) CC="$CC -Ae";;
+- esac;;
+-SunOS*)
+- # Some functions have a prototype only with that define, e.g. confstr
+- AC_DEFINE(__EXTENSIONS__, 1, [Defined on Solaris to see additional function prototypes.])
+- ;;
+-esac
+-
+-
+-AC_SUBST(LIBRARY)
+-AC_MSG_CHECKING(LIBRARY)
+-if test -z "$LIBRARY"
+-then
+- LIBRARY='libpython$(VERSION).a'
+-fi
+-AC_MSG_RESULT($LIBRARY)
+-
+-# LDLIBRARY is the name of the library to link against (as opposed to the
+-# name of the library into which to insert object files). BLDLIBRARY is also
+-# the library to link against, usually. On Mac OS X frameworks, BLDLIBRARY
+-# is blank as the main program is not linked directly against LDLIBRARY.
+-# LDLIBRARYDIR is the path to LDLIBRARY, which is made in a subdirectory. On
+-# systems without shared libraries, LDLIBRARY is the same as LIBRARY
+-# (defined in the Makefiles). On Cygwin LDLIBRARY is the import library,
+-# DLLLIBRARY is the shared (i.e., DLL) library.
+-#
+-# RUNSHARED is used to run shared python without installed libraries
+-#
+-# INSTSONAME is the name of the shared library that will be use to install
+-# on the system - some systems like version suffix, others don't
+-AC_SUBST(LDLIBRARY)
+-AC_SUBST(DLLLIBRARY)
+-AC_SUBST(BLDLIBRARY)
+-AC_SUBST(LDLIBRARYDIR)
+-AC_SUBST(INSTSONAME)
+-AC_SUBST(RUNSHARED)
+-LDLIBRARY="$LIBRARY"
+-BLDLIBRARY='$(LDLIBRARY)'
+-INSTSONAME='$(LDLIBRARY)'
+-DLLLIBRARY=''
+-LDLIBRARYDIR=''
+-RUNSHARED=''
+-
+-# LINKCC is the command that links the python executable -- default is $(CC).
+-# If CXX is set, and if it is needed to link a main function that was
+-# compiled with CXX, LINKCC is CXX instead. Always using CXX is undesirable:
+-# python might then depend on the C++ runtime
+-# This is altered for AIX in order to build the export list before
+-# linking.
+-AC_SUBST(LINKCC)
+-AC_MSG_CHECKING(LINKCC)
+-if test -z "$LINKCC"
+-then
+- LINKCC='$(PURIFY) $(MAINCC)'
+- case $ac_sys_system in
+- AIX*)
+- exp_extra="\"\""
+- if test $ac_sys_release -ge 5 -o \
+- $ac_sys_release -eq 4 -a `uname -r` -ge 2 ; then
+- exp_extra="."
+- fi
+- LINKCC="\$(srcdir)/Modules/makexp_aix Modules/python.exp $exp_extra \$(LIBRARY); $LINKCC";;
+- QNX*)
+- # qcc must be used because the other compilers do not
+- # support -N.
+- LINKCC=qcc;;
+- esac
+-fi
+-AC_MSG_RESULT($LINKCC)
+-
+-# GNULD is set to "yes" if the GNU linker is used. If this goes wrong
+-# make sure we default having it set to "no": this is used by
+-# distutils.unixccompiler to know if it should add --enable-new-dtags
+-# to linker command lines, and failing to detect GNU ld simply results
+-# in the same bahaviour as before.
+-AC_SUBST(GNULD)
+-AC_MSG_CHECKING(for GNU ld)
+-ac_prog=ld
+-if test "$GCC" = yes; then
+- ac_prog=`$CC -print-prog-name=ld`
+-fi
+-case `"$ac_prog" -V 2>&1 < /dev/null` in
+- *GNU*)
+- GNULD=yes;;
+- *)
+- GNULD=no;;
+-esac
+-AC_MSG_RESULT($GNULD)
+-
+-AC_MSG_CHECKING(for --enable-shared)
+-AC_ARG_ENABLE(shared,
+- AS_HELP_STRING([--enable-shared], [disable/enable building shared python library]))
+-
+-if test -z "$enable_shared"
+-then
+- case $ac_sys_system in
+- CYGWIN* | atheos*)
+- enable_shared="yes";;
+- *)
+- enable_shared="no";;
+- esac
+-fi
+-AC_MSG_RESULT($enable_shared)
+-
+-AC_MSG_CHECKING(for --enable-profiling)
+-AC_ARG_ENABLE(profiling,
+- AS_HELP_STRING([--enable-profiling], [enable C-level code profiling]),
+-[ac_save_cc="$CC"
+- CC="$CC -pg"
+- AC_RUN_IFELSE([AC_LANG_SOURCE([[int main() { return 0; }]])],
+- [ac_enable_profiling="yes"],
+- [ac_enable_profiling="no"],
+- [ac_enable_profiling="no"])
+- CC="$ac_save_cc"])
+-AC_MSG_RESULT($ac_enable_profiling)
+-
+-case "$ac_enable_profiling" in
+- "yes")
+- BASECFLAGS="-pg $BASECFLAGS"
+- LDFLAGS="-pg $LDFLAGS"
+- ;;
+-esac
+-
+-AC_MSG_CHECKING(LDLIBRARY)
+-
+-# MacOSX framework builds need more magic. LDLIBRARY is the dynamic
+-# library that we build, but we do not want to link against it (we
+-# will find it with a -framework option). For this reason there is an
+-# extra variable BLDLIBRARY against which Python and the extension
+-# modules are linked, BLDLIBRARY. This is normally the same as
+-# LDLIBRARY, but empty for MacOSX framework builds.
+-if test "$enable_framework"
+-then
+- LDLIBRARY='$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- RUNSHARED=DYLD_FRAMEWORK_PATH="`pwd`:$DYLD_FRAMEWORK_PATH"
+- BLDLIBRARY=''
+-else
+- BLDLIBRARY='$(LDLIBRARY)'
+-fi
+-
+-# Other platforms follow
+-if test $enable_shared = "yes"; then
+- AC_DEFINE(Py_ENABLE_SHARED, 1, [Defined if Python is built as a shared library.])
+- case $ac_sys_system in
+- BeOS*)
+- LDLIBRARY='libpython$(VERSION).so'
+- ;;
+- CYGWIN*)
+- LDLIBRARY='libpython$(VERSION).dll.a'
+- DLLLIBRARY='libpython$(VERSION).dll'
+- ;;
+- SunOS*)
+- LDLIBRARY='libpython$(VERSION).so'
+- BLDLIBRARY='-Wl,-R,$(LIBDIR) -L. -lpython$(VERSION)'
+- RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
+- INSTSONAME="$LDLIBRARY".$SOVERSION
+- ;;
+- Linux*|GNU*|NetBSD*|FreeBSD*|DragonFly*|OpenBSD*)
+- LDLIBRARY='libpython$(VERSION).so'
+- BLDLIBRARY='-L. -lpython$(VERSION)'
+- RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
+- case $ac_sys_system in
+- FreeBSD*)
+- SOVERSION=`echo $SOVERSION|cut -d "." -f 1`
+- ;;
+- esac
+- INSTSONAME="$LDLIBRARY".$SOVERSION
+- ;;
+- hp*|HP*)
+- case `uname -m` in
+- ia64)
+- LDLIBRARY='libpython$(VERSION).so'
+- ;;
+- *)
+- LDLIBRARY='libpython$(VERSION).sl'
+- ;;
+- esac
+- BLDLIBRARY='-Wl,+b,$(LIBDIR) -L. -lpython$(VERSION)'
+- RUNSHARED=SHLIB_PATH=`pwd`:${SHLIB_PATH}
+- ;;
+- OSF*)
+- LDLIBRARY='libpython$(VERSION).so'
+- BLDLIBRARY='-rpath $(LIBDIR) -L. -lpython$(VERSION)'
+- RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
+- ;;
+- atheos*)
+- LDLIBRARY='libpython$(VERSION).so'
+- BLDLIBRARY='-L. -lpython$(VERSION)'
+- RUNSHARED=DLL_PATH=`pwd`:${DLL_PATH:-/atheos/sys/libs:/atheos/autolnk/lib}
+- ;;
+- Darwin*)
+- LDLIBRARY='libpython$(VERSION).dylib'
+- BLDLIBRARY='-L. -lpython$(VERSION)'
+- RUNSHARED='DYLD_LIBRARY_PATH=`pwd`:${DYLD_LIBRARY_PATH}'
+- ;;
+- AIX*)
+- LDLIBRARY='libpython$(VERSION).so'
+- RUNSHARED=LIBPATH=`pwd`:${LIBPATH}
+- ;;
+-
+- esac
+-else # shared is disabled
+- case $ac_sys_system in
+- CYGWIN*)
+- BLDLIBRARY='$(LIBRARY)'
+- LDLIBRARY='libpython$(VERSION).dll.a'
+- ;;
+- esac
+-fi
+-
+-AC_MSG_RESULT($LDLIBRARY)
+-
+-AC_PROG_RANLIB
+-AC_SUBST(AR)
+-AC_CHECK_PROGS(AR, ar aal, ar)
+-
+-# tweak ARFLAGS only if the user didn't set it on the command line
+-AC_SUBST(ARFLAGS)
+-if test -z "$ARFLAGS"
+-then
+- ARFLAGS="rc"
+-fi
+-
+-AC_SUBST(SVNVERSION)
+-AC_CHECK_PROG(SVNVERSION, svnversion, found, not-found)
+-if test $SVNVERSION = found
+-then
+- SVNVERSION="svnversion \$(srcdir)"
+-else
+- SVNVERSION="echo Unversioned directory"
+-fi
+-
+-AC_SUBST(HGVERSION)
+-AC_SUBST(HGTAG)
+-AC_SUBST(HGBRANCH)
+-AC_CHECK_PROG(HAS_HG, hg, found, not-found)
+-if test $HAS_HG = found
+-then
+- HGVERSION="hg id -i \$(srcdir)"
+- HGTAG="hg id -t \$(srcdir)"
+- HGBRANCH="hg id -b \$(srcdir)"
+-else
+- HGVERSION=""
+- HGTAG=""
+- HGBRANCH=""
+-fi
+-
+-case $MACHDEP in
+-bsdos*|hp*|HP*)
+- # install -d does not work on BSDI or HP-UX
+- if test -z "$INSTALL"
+- then
+- INSTALL="${srcdir}/install-sh -c"
+- fi
+-esac
+-AC_PROG_INSTALL
+-
+-# Not every filesystem supports hard links
+-AC_SUBST(LN)
+-if test -z "$LN" ; then
+- case $ac_sys_system in
+- BeOS*) LN="ln -s";;
+- CYGWIN*) LN="ln -s";;
+- atheos*) LN="ln -s";;
+- *) LN=ln;;
+- esac
+-fi
+-
+-# Check for --with-pydebug
+-AC_MSG_CHECKING(for --with-pydebug)
+-AC_ARG_WITH(pydebug,
+- AS_HELP_STRING([--with-pydebug], [build with Py_DEBUG defined]),
+-[
+-if test "$withval" != no
+-then
+- AC_DEFINE(Py_DEBUG, 1,
+- [Define if you want to build an interpreter with many run-time checks.])
+- AC_MSG_RESULT(yes);
+- Py_DEBUG='true'
+-else AC_MSG_RESULT(no); Py_DEBUG='false'
+-fi],
+-[AC_MSG_RESULT(no)])
+-
+-# XXX Shouldn't the code above that fiddles with BASECFLAGS and OPT be
+-# merged with this chunk of code?
+-
+-# Optimizer/debugger flags
+-# ------------------------
+-# (The following bit of code is complicated enough - please keep things
+-# indented properly. Just pretend you're editing Python code. ;-)
+-
+-# There are two parallel sets of case statements below, one that checks to
+-# see if OPT was set and one that does BASECFLAGS setting based upon
+-# compiler and platform. BASECFLAGS tweaks need to be made even if the
+-# user set OPT.
+-
+-# tweak OPT based on compiler and platform, only if the user didn't set
+-# it on the command line
+-AC_SUBST(OPT)
+-if test "${OPT-unset}" = "unset"
+-then
+- case $GCC in
+- yes)
+- if test "$CC" != 'g++' ; then
+- STRICT_PROTO="-Wstrict-prototypes"
+- fi
+- # For gcc 4.x we need to use -fwrapv so lets check if its supported
+- if "$CC" -v --help 2>/dev/null |grep -- -fwrapv > /dev/null; then
+- WRAP="-fwrapv"
+- fi
+-
+- # Clang also needs -fwrapv
+- case $CC in
+- *clang*) WRAP="-fwrapv"
+- ;;
+- esac
+-
+- case $ac_cv_prog_cc_g in
+- yes)
+- if test "$Py_DEBUG" = 'true' ; then
+- # Optimization messes up debuggers, so turn it off for
+- # debug builds.
+- OPT="-g -O0 -Wall $STRICT_PROTO"
+- else
+- OPT="-g $WRAP -O3 -Wall $STRICT_PROTO"
+- fi
+- ;;
+- *)
+- OPT="-O3 -Wall $STRICT_PROTO"
+- ;;
+- esac
+- case $ac_sys_system in
+- SCO_SV*) OPT="$OPT -m486 -DSCO5"
+- ;;
+- esac
+- ;;
+-
+- *)
+- OPT="-O"
+- ;;
+- esac
+-fi
+-
+-AC_SUBST(BASECFLAGS)
+-
+-# The -arch flags for universal builds on OSX
+-UNIVERSAL_ARCH_FLAGS=
+-AC_SUBST(UNIVERSAL_ARCH_FLAGS)
+-
+-# tweak BASECFLAGS based on compiler and platform
+-case $GCC in
+-yes)
+- # Python violates C99 rules, by casting between incompatible
+- # pointer types. GCC may generate bad code as a result of that,
+- # so use -fno-strict-aliasing if supported.
+- AC_MSG_CHECKING(whether $CC accepts -fno-strict-aliasing)
+- ac_save_cc="$CC"
+- CC="$CC -fno-strict-aliasing"
+- AC_CACHE_VAL(ac_cv_no_strict_aliasing_ok,
+- AC_COMPILE_IFELSE(
+- [AC_LANG_PROGRAM([[]], [[]])],
+- [ac_cv_no_strict_aliasing_ok=yes],
+- [ac_cv_no_strict_aliasing_ok=no]))
+- CC="$ac_save_cc"
+- AC_MSG_RESULT($ac_cv_no_strict_aliasing_ok)
+- if test $ac_cv_no_strict_aliasing_ok = yes
+- then
+- BASECFLAGS="$BASECFLAGS -fno-strict-aliasing"
+- fi
+-
+- # if using gcc on alpha, use -mieee to get (near) full IEEE 754
+- # support. Without this, treatment of subnormals doesn't follow
+- # the standard.
+- case $ac_sys_machine in
+- alpha*)
+- BASECFLAGS="$BASECFLAGS -mieee"
+- ;;
+- esac
+-
+- case $ac_sys_system in
+- SCO_SV*)
+- BASECFLAGS="$BASECFLAGS -m486 -DSCO5"
+- ;;
+- # is there any other compiler on Darwin besides gcc?
+- Darwin*)
+- # -Wno-long-double, -no-cpp-precomp, and -mno-fused-madd
+- # used to be here, but non-Apple gcc doesn't accept them.
+- if test "${CC}" = gcc
+- then
+- AC_MSG_CHECKING(which compiler should be used)
+- case "${UNIVERSALSDK}" in
+- */MacOSX10.4u.sdk)
+- # Build using 10.4 SDK, force usage of gcc when the
+- # compiler is gcc, otherwise the user will get very
+- # confusing error messages when building on OSX 10.6
+- CC=gcc-4.0
+- CPP=cpp-4.0
+- ;;
+- esac
+- AC_MSG_RESULT($CC)
+- fi
+-
+- # Calculate the right deployment target for this build.
+- #
+- cur_target=`sw_vers -productVersion | sed 's/\(10\.[[0-9]]*\).*/\1/'`
+- if test ${cur_target} '>' 10.2; then
+- cur_target=10.3
+- if test ${enable_universalsdk}; then
+- if test "${UNIVERSAL_ARCHS}" = "all"; then
+- # Ensure that the default platform for a
+- # 4-way universal build is OSX 10.5,
+- # that's the first OS release where
+- # 4-way builds make sense.
+- cur_target='10.5'
+-
+- elif test "${UNIVERSAL_ARCHS}" = "3-way"; then
+- cur_target='10.5'
+-
+- elif test "${UNIVERSAL_ARCHS}" = "intel"; then
+- cur_target='10.5'
+-
+- elif test "${UNIVERSAL_ARCHS}" = "64-bit"; then
+- cur_target='10.5'
+- fi
+- else
+- if test `/usr/bin/arch` = "i386"; then
+- # On Intel macs default to a deployment
+- # target of 10.4, that's the first OSX
+- # release with Intel support.
+- cur_target="10.4"
+- fi
+- fi
+- fi
+- CONFIGURE_MACOSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET-${cur_target}}
+-
+- # Make sure that MACOSX_DEPLOYMENT_TARGET is set in the
+- # environment with a value that is the same as what we'll use
+- # in the Makefile to ensure that we'll get the same compiler
+- # environment during configure and build time.
+- MACOSX_DEPLOYMENT_TARGET="$CONFIGURE_MACOSX_DEPLOYMENT_TARGET"
+- export MACOSX_DEPLOYMENT_TARGET
+- EXPORT_MACOSX_DEPLOYMENT_TARGET=''
+-
+- if test "${enable_universalsdk}"; then
+- UNIVERSAL_ARCH_FLAGS=""
+- if test "$UNIVERSAL_ARCHS" = "32-bit" ; then
+- UNIVERSAL_ARCH_FLAGS="-arch ppc -arch i386"
+- ARCH_RUN_32BIT=""
+- LIPO_32BIT_FLAGS=""
+-
+- elif test "$UNIVERSAL_ARCHS" = "64-bit" ; then
+- UNIVERSAL_ARCH_FLAGS="-arch ppc64 -arch x86_64"
+- LIPO_32BIT_FLAGS=""
+- ARCH_RUN_32BIT="true"
+-
+- elif test "$UNIVERSAL_ARCHS" = "all" ; then
+- UNIVERSAL_ARCH_FLAGS="-arch i386 -arch ppc -arch ppc64 -arch x86_64"
+- LIPO_32BIT_FLAGS="-extract ppc7400 -extract i386"
+- ARCH_RUN_32BIT="/usr/bin/arch -i386 -ppc"
+-
+- elif test "$UNIVERSAL_ARCHS" = "intel" ; then
+- UNIVERSAL_ARCH_FLAGS="-arch i386 -arch x86_64"
+- LIPO_32BIT_FLAGS="-extract i386"
+- ARCH_RUN_32BIT="/usr/bin/arch -i386"
+-
+- elif test "$UNIVERSAL_ARCHS" = "3-way" ; then
+- UNIVERSAL_ARCH_FLAGS="-arch i386 -arch ppc -arch x86_64"
+- LIPO_32BIT_FLAGS="-extract ppc7400 -extract i386"
+- ARCH_RUN_32BIT="/usr/bin/arch -i386 -ppc"
+-
+- else
+- AC_MSG_ERROR([proper usage is --with-universal-arch=32-bit|64-bit|all|intel|3-way])
+-
+- fi
+-
+-
+- CFLAGS="${UNIVERSAL_ARCH_FLAGS} ${CFLAGS}"
+- if test "${UNIVERSALSDK}" != "/"
+- then
+- CPPFLAGS="-isysroot ${UNIVERSALSDK} ${CPPFLAGS}"
+- LDFLAGS="-isysroot ${UNIVERSALSDK} ${LDFLAGS}"
+- CFLAGS="-isysroot ${UNIVERSALSDK} ${CFLAGS}"
+- fi
+-
+- fi
+-
+-
+- ;;
+- OSF*)
+- BASECFLAGS="$BASECFLAGS -mieee"
+- ;;
+- esac
+- ;;
+-
+-*)
+- case $ac_sys_system in
+- OpenUNIX*|UnixWare*)
+- BASECFLAGS="$BASECFLAGS -K pentium,host,inline,loop_unroll,alloca "
+- ;;
+- OSF*)
+- BASECFLAGS="$BASECFLAGS -ieee -std"
+- ;;
+- SCO_SV*)
+- BASECFLAGS="$BASECFLAGS -belf -Ki486 -DSCO5"
+- ;;
+- esac
+- ;;
+-esac
+-
+-if test "$Py_DEBUG" = 'true'; then
+- :
+-else
+- OPT="-DNDEBUG $OPT"
+-fi
+-
+-if test "$ac_arch_flags"
+-then
+- BASECFLAGS="$BASECFLAGS $ac_arch_flags"
+-fi
+-
+-# disable check for icc since it seems to pass, but generates a warning
+-if test "$CC" = icc
+-then
+- ac_cv_opt_olimit_ok=no
+-fi
+-
+-AC_MSG_CHECKING(whether $CC accepts -OPT:Olimit=0)
+-AC_CACHE_VAL(ac_cv_opt_olimit_ok,
+-[ac_save_cc="$CC"
+-CC="$CC -OPT:Olimit=0"
+-AC_COMPILE_IFELSE(
+- [AC_LANG_PROGRAM([[]], [[]])],
+- [ac_cv_opt_olimit_ok=yes],
+- [ac_cv_opt_olimit_ok=no]
+- )
+-CC="$ac_save_cc"])
+-AC_MSG_RESULT($ac_cv_opt_olimit_ok)
+-if test $ac_cv_opt_olimit_ok = yes; then
+- case $ac_sys_system in
+- # XXX is this branch needed? On MacOSX 10.2.2 the result of the
+- # olimit_ok test is "no". Is it "yes" in some other Darwin-esque
+- # environment?
+- Darwin*)
+- ;;
+- *)
+- BASECFLAGS="$BASECFLAGS -OPT:Olimit=0"
+- ;;
+- esac
+-else
+- AC_MSG_CHECKING(whether $CC accepts -Olimit 1500)
+- AC_CACHE_VAL(ac_cv_olimit_ok,
+- [ac_save_cc="$CC"
+- CC="$CC -Olimit 1500"
+- AC_COMPILE_IFELSE(
+- [AC_LANG_PROGRAM([[]], [[]])],
+- [ac_cv_olimit_ok=yes],
+- [ac_cv_olimit_ok=no]
+- )
+- CC="$ac_save_cc"])
+- AC_MSG_RESULT($ac_cv_olimit_ok)
+- if test $ac_cv_olimit_ok = yes; then
+- BASECFLAGS="$BASECFLAGS -Olimit 1500"
+- fi
+-fi
+-
+-# Check whether GCC supports PyArg_ParseTuple format
+-if test "$GCC" = "yes"
+-then
+- AC_MSG_CHECKING(whether gcc supports ParseTuple __format__)
+- save_CFLAGS=$CFLAGS
+- CFLAGS="$CFLAGS -Werror"
+- AC_COMPILE_IFELSE([
+- AC_LANG_PROGRAM([[void f(char*,...)__attribute((format(PyArg_ParseTuple, 1, 2)));]], [[]])
+- ],[
+- AC_DEFINE(HAVE_ATTRIBUTE_FORMAT_PARSETUPLE, 1,
+- [Define if GCC supports __attribute__((format(PyArg_ParseTuple, 2, 3)))])
+- AC_MSG_RESULT(yes)
+- ],[
+- AC_MSG_RESULT(no)
+- ])
+- CFLAGS=$save_CFLAGS
+-fi
+-
+-# On some compilers, pthreads are available without further options
+-# (e.g. MacOS X). On some of these systems, the compiler will not
+-# complain if unaccepted options are passed (e.g. gcc on Mac OS X).
+-# So we have to see first whether pthreads are available without
+-# options before we can check whether -Kpthread improves anything.
+-AC_MSG_CHECKING(whether pthreads are available without options)
+-AC_CACHE_VAL(ac_cv_pthread_is_default,
+-[AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <pthread.h>
+-
+-void* routine(void* p){return NULL;}
+-
+-int main(){
+- pthread_t p;
+- if(pthread_create(&p,NULL,routine,NULL)!=0)
+- return 1;
+- (void)pthread_detach(p);
+- return 0;
+-}
+-]])],[
+- ac_cv_pthread_is_default=yes
+- ac_cv_kthread=no
+- ac_cv_pthread=no
+-],[ac_cv_pthread_is_default=no],[ac_cv_pthread_is_default=no])
+-])
+-AC_MSG_RESULT($ac_cv_pthread_is_default)
+-
+-
+-if test $ac_cv_pthread_is_default = yes
+-then
+- ac_cv_kpthread=no
+-else
+-# -Kpthread, if available, provides the right #defines
+-# and linker options to make pthread_create available
+-# Some compilers won't report that they do not support -Kpthread,
+-# so we need to run a program to see whether it really made the
+-# function available.
+-AC_MSG_CHECKING(whether $CC accepts -Kpthread)
+-AC_CACHE_VAL(ac_cv_kpthread,
+-[ac_save_cc="$CC"
+-CC="$CC -Kpthread"
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <pthread.h>
+-
+-void* routine(void* p){return NULL;}
+-
+-int main(){
+- pthread_t p;
+- if(pthread_create(&p,NULL,routine,NULL)!=0)
+- return 1;
+- (void)pthread_detach(p);
+- return 0;
+-}
+-]])],[ac_cv_kpthread=yes],[ac_cv_kpthread=no],[ac_cv_kpthread=no])
+-CC="$ac_save_cc"])
+-AC_MSG_RESULT($ac_cv_kpthread)
+-fi
+-
+-if test $ac_cv_kpthread = no -a $ac_cv_pthread_is_default = no
+-then
+-# -Kthread, if available, provides the right #defines
+-# and linker options to make pthread_create available
+-# Some compilers won't report that they do not support -Kthread,
+-# so we need to run a program to see whether it really made the
+-# function available.
+-AC_MSG_CHECKING(whether $CC accepts -Kthread)
+-AC_CACHE_VAL(ac_cv_kthread,
+-[ac_save_cc="$CC"
+-CC="$CC -Kthread"
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <pthread.h>
+-
+-void* routine(void* p){return NULL;}
+-
+-int main(){
+- pthread_t p;
+- if(pthread_create(&p,NULL,routine,NULL)!=0)
+- return 1;
+- (void)pthread_detach(p);
+- return 0;
+-}
+-]])],[ac_cv_kthread=yes],[ac_cv_kthread=no],[ac_cv_kthread=no])
+-CC="$ac_save_cc"])
+-AC_MSG_RESULT($ac_cv_kthread)
+-fi
+-
+-if test $ac_cv_kthread = no -a $ac_cv_pthread_is_default = no
+-then
+-# -pthread, if available, provides the right #defines
+-# and linker options to make pthread_create available
+-# Some compilers won't report that they do not support -pthread,
+-# so we need to run a program to see whether it really made the
+-# function available.
+-AC_MSG_CHECKING(whether $CC accepts -pthread)
+-AC_CACHE_VAL(ac_cv_thread,
+-[ac_save_cc="$CC"
+-CC="$CC -pthread"
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <pthread.h>
+-
+-void* routine(void* p){return NULL;}
+-
+-int main(){
+- pthread_t p;
+- if(pthread_create(&p,NULL,routine,NULL)!=0)
+- return 1;
+- (void)pthread_detach(p);
+- return 0;
+-}
+-]])],[ac_cv_pthread=yes],[ac_cv_pthread=no],[ac_cv_pthread=no])
+-CC="$ac_save_cc"])
+-AC_MSG_RESULT($ac_cv_pthread)
+-fi
+-
+-# If we have set a CC compiler flag for thread support then
+-# check if it works for CXX, too.
+-ac_cv_cxx_thread=no
+-if test ! -z "$CXX"
+-then
+-AC_MSG_CHECKING(whether $CXX also accepts flags for thread support)
+-ac_save_cxx="$CXX"
+-
+-if test "$ac_cv_kpthread" = "yes"
+-then
+- CXX="$CXX -Kpthread"
+- ac_cv_cxx_thread=yes
+-elif test "$ac_cv_kthread" = "yes"
+-then
+- CXX="$CXX -Kthread"
+- ac_cv_cxx_thread=yes
+-elif test "$ac_cv_pthread" = "yes"
+-then
+- CXX="$CXX -pthread"
+- ac_cv_cxx_thread=yes
+-fi
+-
+-if test $ac_cv_cxx_thread = yes
+-then
+- echo 'void foo();int main(){foo();}void foo(){}' > conftest.$ac_ext
+- $CXX -c conftest.$ac_ext 2>&5
+- if $CXX -o conftest$ac_exeext conftest.$ac_objext 2>&5 \
+- && test -s conftest$ac_exeext && ./conftest$ac_exeext
+- then
+- ac_cv_cxx_thread=yes
+- else
+- ac_cv_cxx_thread=no
+- fi
+- rm -fr conftest*
+-fi
+-AC_MSG_RESULT($ac_cv_cxx_thread)
+-fi
+-CXX="$ac_save_cxx"
+-
+-dnl # check for ANSI or K&R ("traditional") preprocessor
+-dnl AC_MSG_CHECKING(for C preprocessor type)
+-dnl AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-dnl #define spam(name, doc) {#name, &name, #name "() -- " doc}
+-dnl int foo;
+-dnl struct {char *name; int *addr; char *doc;} desc = spam(foo, "something");
+-dnl ]], [[;]])],[cpp_type=ansi],[AC_DEFINE(HAVE_OLD_CPP) cpp_type=traditional])
+-dnl AC_MSG_RESULT($cpp_type)
+-
+-# checks for header files
+-AC_HEADER_STDC
+-AC_CHECK_HEADERS(asm/types.h conio.h curses.h direct.h dlfcn.h errno.h \
+-fcntl.h grp.h \
+-ieeefp.h io.h langinfo.h libintl.h ncurses.h poll.h process.h pthread.h \
+-shadow.h signal.h stdint.h stropts.h termios.h thread.h \
+-unistd.h utime.h \
+-sys/audioio.h sys/bsdtty.h sys/epoll.h sys/event.h sys/file.h sys/loadavg.h \
+-sys/lock.h sys/mkdev.h sys/modem.h \
+-sys/param.h sys/poll.h sys/select.h sys/socket.h sys/statvfs.h sys/stat.h \
+-sys/termio.h sys/time.h \
+-sys/times.h sys/types.h sys/un.h sys/utsname.h sys/wait.h pty.h libutil.h \
+-sys/resource.h netpacket/packet.h sysexits.h bluetooth.h \
+-bluetooth/bluetooth.h linux/tipc.h spawn.h util.h)
+-AC_HEADER_DIRENT
+-AC_HEADER_MAJOR
+-
+-# On Solaris, term.h requires curses.h
+-AC_CHECK_HEADERS(term.h,,,[
+-#ifdef HAVE_CURSES_H
+-#include <curses.h>
+-#endif
+-])
+-
+-# On Linux, netlink.h requires asm/types.h
+-AC_CHECK_HEADERS(linux/netlink.h,,,[
+-#ifdef HAVE_ASM_TYPES_H
+-#include <asm/types.h>
+-#endif
+-#ifdef HAVE_SYS_SOCKET_H
+-#include <sys/socket.h>
+-#endif
+-])
+-
+-# checks for typedefs
+-was_it_defined=no
+-AC_MSG_CHECKING(for clock_t in time.h)
+-AC_EGREP_HEADER(clock_t, time.h, was_it_defined=yes, [
+- AC_DEFINE(clock_t, long, [Define to 'long' if <time.h> doesn't define.])
+-])
+-AC_MSG_RESULT($was_it_defined)
+-
+-# Check whether using makedev requires defining _OSF_SOURCE
+-AC_MSG_CHECKING(for makedev)
+-AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+-#if defined(MAJOR_IN_MKDEV)
+-#include <sys/mkdev.h>
+-#elif defined(MAJOR_IN_SYSMACROS)
+-#include <sys/sysmacros.h>
+-#else
+-#include <sys/types.h>
+-#endif ]], [[ makedev(0, 0) ]])],
+-[ac_cv_has_makedev=yes],
+-[ac_cv_has_makedev=no])
+-if test "$ac_cv_has_makedev" = "no"; then
+- # we didn't link, try if _OSF_SOURCE will allow us to link
+- AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+-#define _OSF_SOURCE 1
+-#include <sys/types.h>
+- ]], [[ makedev(0, 0) ]])],
+-[ac_cv_has_makedev=yes],
+-[ac_cv_has_makedev=no])
+- if test "$ac_cv_has_makedev" = "yes"; then
+- AC_DEFINE(_OSF_SOURCE, 1, [Define _OSF_SOURCE to get the makedev macro.])
+- fi
+-fi
+-AC_MSG_RESULT($ac_cv_has_makedev)
+-if test "$ac_cv_has_makedev" = "yes"; then
+- AC_DEFINE(HAVE_MAKEDEV, 1, [Define this if you have the makedev macro.])
+-fi
+-
+-# Enabling LFS on Solaris (2.6 to 9) with gcc 2.95 triggers a bug in
+-# the system headers: If _XOPEN_SOURCE and _LARGEFILE_SOURCE are
+-# defined, but the compiler does not support pragma redefine_extname,
+-# and _LARGEFILE64_SOURCE is not defined, the headers refer to 64-bit
+-# structures (such as rlimit64) without declaring them. As a
+-# work-around, disable LFS on such configurations
+-
+-use_lfs=yes
+-AC_MSG_CHECKING(Solaris LFS bug)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-#define _LARGEFILE_SOURCE 1
+-#define _FILE_OFFSET_BITS 64
+-#include <sys/resource.h>
+-]], [[struct rlimit foo;]])],[sol_lfs_bug=no],[sol_lfs_bug=yes])
+-AC_MSG_RESULT($sol_lfs_bug)
+-if test "$sol_lfs_bug" = "yes"; then
+- use_lfs=no
+-fi
+-
+-if test "$use_lfs" = "yes"; then
+-# Two defines needed to enable largefile support on various platforms
+-# These may affect some typedefs
+-case $ac_sys_system/$ac_sys_release in
+-AIX*)
+- AC_DEFINE(_LARGE_FILES, 1,
+- [This must be defined on AIX systems to enable large file support.])
+- ;;
+-esac
+-AC_DEFINE(_LARGEFILE_SOURCE, 1,
+-[This must be defined on some systems to enable large file support.])
+-AC_DEFINE(_FILE_OFFSET_BITS, 64,
+-[This must be set to 64 on some systems to enable large file support.])
+-fi
+-
+-# Add some code to confdefs.h so that the test for off_t works on SCO
+-cat >> confdefs.h <<\EOF
+-#if defined(SCO_DS)
+-#undef _OFF_T
+-#endif
+-EOF
+-
+-# Type availability checks
+-AC_TYPE_MODE_T
+-AC_TYPE_OFF_T
+-AC_TYPE_PID_T
+-AC_DEFINE_UNQUOTED([RETSIGTYPE],[void],[assume C89 semantics that RETSIGTYPE is always void])
+-AC_TYPE_SIZE_T
+-AC_TYPE_UID_T
+-AC_TYPE_UINT32_T
+-AC_TYPE_UINT64_T
+-AC_TYPE_INT32_T
+-AC_TYPE_INT64_T
+-AC_CHECK_TYPE(ssize_t,
+- AC_DEFINE(HAVE_SSIZE_T, 1, [Define if your compiler provides ssize_t]),,)
+-
+-# Sizes of various common basic types
+-# ANSI C requires sizeof(char) == 1, so no need to check it
+-AC_CHECK_SIZEOF(int, 4)
+-AC_CHECK_SIZEOF(long, 4)
+-AC_CHECK_SIZEOF(void *, 4)
+-AC_CHECK_SIZEOF(short, 2)
+-AC_CHECK_SIZEOF(float, 4)
+-AC_CHECK_SIZEOF(double, 8)
+-AC_CHECK_SIZEOF(fpos_t, 4)
+-AC_CHECK_SIZEOF(size_t, 4)
+-AC_CHECK_SIZEOF(pid_t, 4)
+-
+-AC_MSG_CHECKING(for long long support)
+-have_long_long=no
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[long long x; x = (long long)0;]])],[
+- AC_DEFINE(HAVE_LONG_LONG, 1, [Define this if you have the type long long.])
+- have_long_long=yes
+-],[])
+-AC_MSG_RESULT($have_long_long)
+-if test "$have_long_long" = yes ; then
+-AC_CHECK_SIZEOF(long long, 8)
+-fi
+-
+-AC_MSG_CHECKING(for long double support)
+-have_long_double=no
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[long double x; x = (long double)0;]])],[
+- AC_DEFINE(HAVE_LONG_DOUBLE, 1, [Define this if you have the type long double.])
+- have_long_double=yes
+-],[])
+-AC_MSG_RESULT($have_long_double)
+-if test "$have_long_double" = yes ; then
+-AC_CHECK_SIZEOF(long double, 12)
+-fi
+-
+-AC_MSG_CHECKING(for _Bool support)
+-have_c99_bool=no
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[_Bool x; x = (_Bool)0;]])],[
+- AC_DEFINE(HAVE_C99_BOOL, 1, [Define this if you have the type _Bool.])
+- have_c99_bool=yes
+-],[])
+-AC_MSG_RESULT($have_c99_bool)
+-if test "$have_c99_bool" = yes ; then
+-AC_CHECK_SIZEOF(_Bool, 1)
+-fi
+-
+-AC_CHECK_TYPES(uintptr_t,
+- [AC_CHECK_SIZEOF(uintptr_t, 4)],
+- [], [#ifdef HAVE_STDINT_H
+- #include <stdint.h>
+- #endif
+- #ifdef HAVE_INTTYPES_H
+- #include <inttypes.h>
+- #endif])
+-
+-AC_CHECK_SIZEOF(off_t, [], [
+-#ifdef HAVE_SYS_TYPES_H
+-#include <sys/types.h>
+-#endif
+-])
+-
+-AC_MSG_CHECKING(whether to enable large file support)
+-if test "$have_long_long" = yes
+-then
+-if test "$ac_cv_sizeof_off_t" -gt "$ac_cv_sizeof_long" -a \
+- "$ac_cv_sizeof_long_long" -ge "$ac_cv_sizeof_off_t"; then
+- AC_DEFINE(HAVE_LARGEFILE_SUPPORT, 1,
+- [Defined to enable large file support when an off_t is bigger than a long
+- and long long is available and at least as big as an off_t. You may need
+- to add some flags for configuration and compilation to enable this mode.
+- (For Solaris and Linux, the necessary defines are already defined.)])
+- AC_MSG_RESULT(yes)
+-else
+- AC_MSG_RESULT(no)
+-fi
+-else
+- AC_MSG_RESULT(no)
+-fi
+-
+-AC_CHECK_SIZEOF(time_t, [], [
+-#ifdef HAVE_SYS_TYPES_H
+-#include <sys/types.h>
+-#endif
+-#ifdef HAVE_TIME_H
+-#include <time.h>
+-#endif
+-])
+-
+-# if have pthread_t then define SIZEOF_PTHREAD_T
+-ac_save_cc="$CC"
+-if test "$ac_cv_kpthread" = "yes"
+-then CC="$CC -Kpthread"
+-elif test "$ac_cv_kthread" = "yes"
+-then CC="$CC -Kthread"
+-elif test "$ac_cv_pthread" = "yes"
+-then CC="$CC -pthread"
+-fi
+-AC_MSG_CHECKING(for pthread_t)
+-have_pthread_t=no
+-AC_COMPILE_IFELSE([
+- AC_LANG_PROGRAM([[#include <pthread.h>]], [[pthread_t x; x = *(pthread_t*)0;]])
+-],[have_pthread_t=yes],[])
+-AC_MSG_RESULT($have_pthread_t)
+-if test "$have_pthread_t" = yes ; then
+- AC_CHECK_SIZEOF(pthread_t, [], [
+-#ifdef HAVE_PTHREAD_H
+-#include <pthread.h>
+-#endif
+- ])
+-fi
+-CC="$ac_save_cc"
+-
+-AC_MSG_CHECKING(for --enable-toolbox-glue)
+-AC_ARG_ENABLE(toolbox-glue,
+- AS_HELP_STRING([--enable-toolbox-glue], [disable/enable MacOSX glue code for extensions]))
+-
+-if test -z "$enable_toolbox_glue"
+-then
+- case $ac_sys_system/$ac_sys_release in
+- Darwin/*)
+- enable_toolbox_glue="yes";;
+- *)
+- enable_toolbox_glue="no";;
+- esac
+-fi
+-case "$enable_toolbox_glue" in
+-yes)
+- extra_machdep_objs="Python/mactoolboxglue.o"
+- extra_undefs="-u _PyMac_Error"
+- AC_DEFINE(USE_TOOLBOX_OBJECT_GLUE, 1,
+- [Define if you want to use MacPython modules on MacOSX in unix-Python.])
+- ;;
+-*)
+- extra_machdep_objs=""
+- extra_undefs=""
+- ;;
+-esac
+-AC_MSG_RESULT($enable_toolbox_glue)
+-
+-
+-AC_SUBST(OTHER_LIBTOOL_OPT)
+-case $ac_sys_system/$ac_sys_release in
+- Darwin/@<:@01567@:>@\..*)
+- OTHER_LIBTOOL_OPT="-prebind -seg1addr 0x10000000"
+- ;;
+- Darwin/*)
+- OTHER_LIBTOOL_OPT=""
+- ;;
+-esac
+-
+-
+-ARCH_RUN_32BIT=""
+-AC_SUBST(LIBTOOL_CRUFT)
+-case $ac_sys_system/$ac_sys_release in
+- Darwin/@<:@01567@:>@\..*)
+- LIBTOOL_CRUFT="-framework System -lcc_dynamic"
+- if test "${enable_universalsdk}"; then
+- :
+- else
+- LIBTOOL_CRUFT="${LIBTOOL_CRUFT} -arch_only `/usr/bin/arch`"
+- fi
+- LIBTOOL_CRUFT=$LIBTOOL_CRUFT' -install_name $(PYTHONFRAMEWORKINSTALLDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- LIBTOOL_CRUFT=$LIBTOOL_CRUFT' -compatibility_version $(VERSION) -current_version $(VERSION)';;
+- Darwin/*)
+- gcc_version=`gcc -dumpversion`
+- if test ${gcc_version} '<' 4.0
+- then
+- LIBTOOL_CRUFT="-lcc_dynamic"
+- else
+- LIBTOOL_CRUFT=""
+- fi
+- AC_RUN_IFELSE([AC_LANG_SOURCE([[
+- #include <unistd.h>
+- int main(int argc, char*argv[])
+- {
+- if (sizeof(long) == 4) {
+- return 0;
+- } else {
+- return 1;
+- }
+- }
+- ]])],[ac_osx_32bit=yes],[ac_osx_32bit=no],[ac_osx_32bit=yes])
+-
+- if test "${ac_osx_32bit}" = "yes"; then
+- case `/usr/bin/arch` in
+- i386)
+- MACOSX_DEFAULT_ARCH="i386"
+- ;;
+- ppc)
+- MACOSX_DEFAULT_ARCH="ppc"
+- ;;
+- *)
+- AC_MSG_ERROR([Unexpected output of 'arch' on OSX])
+- ;;
+- esac
+- else
+- case `/usr/bin/arch` in
+- i386)
+- MACOSX_DEFAULT_ARCH="x86_64"
+- ;;
+- ppc)
+- MACOSX_DEFAULT_ARCH="ppc64"
+- ;;
+- *)
+- AC_MSG_ERROR([Unexpected output of 'arch' on OSX])
+- ;;
+- esac
+-
+- #ARCH_RUN_32BIT="true"
+- fi
+-
+- LIBTOOL_CRUFT=$LIBTOOL_CRUFT" -lSystem -lSystemStubs -arch_only ${MACOSX_DEFAULT_ARCH}"
+- LIBTOOL_CRUFT=$LIBTOOL_CRUFT' -install_name $(PYTHONFRAMEWORKINSTALLDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- LIBTOOL_CRUFT=$LIBTOOL_CRUFT' -compatibility_version $(VERSION) -current_version $(VERSION)';;
+-esac
+-
+-AC_MSG_CHECKING(for --enable-framework)
+-if test "$enable_framework"
+-then
+- BASECFLAGS="$BASECFLAGS -fno-common -dynamic"
+- # -F. is needed to allow linking to the framework while
+- # in the build location.
+- AC_DEFINE(WITH_NEXT_FRAMEWORK, 1,
+- [Define if you want to produce an OpenStep/Rhapsody framework
+- (shared library plus accessory files).])
+- AC_MSG_RESULT(yes)
+- if test $enable_shared = "yes"
+- then
+- AC_MSG_ERROR([Specifying both --enable-shared and --enable-framework is not supported, use only --enable-framework instead. See Mac/README.])
+- fi
+-else
+- AC_MSG_RESULT(no)
+-fi
+-
+-AC_MSG_CHECKING(for dyld)
+-case $ac_sys_system/$ac_sys_release in
+- Darwin/*)
+- AC_DEFINE(WITH_DYLD, 1,
+- [Define if you want to use the new-style (Openstep, Rhapsody, MacOS)
+- dynamic linker (dyld) instead of the old-style (NextStep) dynamic
+- linker (rld). Dyld is necessary to support frameworks.])
+- AC_MSG_RESULT(always on for Darwin)
+- ;;
+- *)
+- AC_MSG_RESULT(no)
+- ;;
+-esac
+-
+-# Set info about shared libraries.
+-AC_SUBST(SO)
+-AC_SUBST(LDSHARED)
+-AC_SUBST(LDCXXSHARED)
+-AC_SUBST(BLDSHARED)
+-AC_SUBST(CCSHARED)
+-AC_SUBST(LINKFORSHARED)
+-# SO is the extension of shared libraries `(including the dot!)
+-# -- usually .so, .sl on HP-UX, .dll on Cygwin
+-AC_MSG_CHECKING(SO)
+-if test -z "$SO"
+-then
+- case $ac_sys_system in
+- hp*|HP*)
+- case `uname -m` in
+- ia64) SO=.so;;
+- *) SO=.sl;;
+- esac
+- ;;
+- CYGWIN*) SO=.dll;;
+- *) SO=.so;;
+- esac
+-else
+- # this might also be a termcap variable, see #610332
+- echo
+- echo '====================================================================='
+- echo '+ +'
+- echo '+ WARNING: You have set SO in your environment. +'
+- echo '+ Do you really mean to change the extension for shared libraries? +'
+- echo '+ Continuing in 10 seconds to let you to ponder. +'
+- echo '+ +'
+- echo '====================================================================='
+- sleep 10
+-fi
+-AC_MSG_RESULT($SO)
+-
+-AC_DEFINE_UNQUOTED(SHLIB_EXT, "$SO", [Define this to be extension of shared libraries (including the dot!).])
+-# LDSHARED is the ld *command* used to create shared library
+-# -- "cc -G" on SunOS 5.x, "ld -shared" on IRIX 5
+-# (Shared libraries in this instance are shared modules to be loaded into
+-# Python, as opposed to building Python itself as a shared library.)
+-AC_MSG_CHECKING(LDSHARED)
+-if test -z "$LDSHARED"
+-then
+- case $ac_sys_system/$ac_sys_release in
+- AIX*)
+- BLDSHARED="\$(srcdir)/Modules/ld_so_aix \$(CC) -bI:\$(srcdir)/Modules/python.exp"
+- LDSHARED="\$(BINLIBDEST)/config/ld_so_aix \$(CC) -bI:\$(BINLIBDEST)/config/python.exp"
+- ;;
+- BeOS*)
+- BLDSHARED="\$(srcdir)/Modules/ld_so_beos $LDLIBRARY"
+- LDSHARED="\$(BINLIBDEST)/config/ld_so_beos \$(LIBDIR)/$LDLIBRARY"
+- ;;
+- IRIX/5*) LDSHARED="ld -shared";;
+- IRIX*/6*) LDSHARED="ld ${SGI_ABI} -shared -all";;
+- SunOS/5*)
+- if test "$GCC" = "yes" ; then
+- LDSHARED='$(CC) -shared'
+- LDCXXSHARED='$(CXX) -shared'
+- else
+- LDSHARED='$(CC) -G'
+- LDCXXSHARED='$(CXX) -G'
+- fi ;;
+- hp*|HP*)
+- if test "$GCC" = "yes" ; then
+- LDSHARED='$(CC) -shared'
+- LDCXXSHARED='$(CXX) -shared'
+- else
+- LDSHARED='ld -b'
+- fi ;;
+- OSF*) LDSHARED="ld -shared -expect_unresolved \"*\"";;
+- Darwin/1.3*)
+- LDSHARED='$(CC) -bundle'
+- LDCXXSHARED='$(CXX) -bundle'
+- if test "$enable_framework" ; then
+- # Link against the framework. All externals should be defined.
+- BLDSHARED="$LDSHARED "'$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- LDSHARED="$LDSHARED "'$(PYTHONFRAMEWORKPREFIX)/$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- LDCXXSHARED="$LDCXXSHARED "'$(PYTHONFRAMEWORKPREFIX)/$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- else
+- # No framework. Ignore undefined symbols, assuming they come from Python
+- LDSHARED="$LDSHARED -undefined suppress"
+- LDCXXSHARED="$LDCXXSHARED -undefined suppress"
+- fi ;;
+- Darwin/1.4*|Darwin/5.*|Darwin/6.*)
+- LDSHARED='$(CC) -bundle'
+- LDCXXSHARED='$(CXX) -bundle'
+- if test "$enable_framework" ; then
+- # Link against the framework. All externals should be defined.
+- BLDSHARED="$LDSHARED "'$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- LDSHARED="$LDSHARED "'$(PYTHONFRAMEWORKPREFIX)/$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- LDCXXSHARED="$LDCXXSHARED "'$(PYTHONFRAMEWORKPREFIX)/$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- else
+- # No framework, use the Python app as bundle-loader
+- BLDSHARED="$LDSHARED "'-bundle_loader $(BUILDPYTHON)'
+- LDSHARED="$LDSHARED "'-bundle_loader $(BINDIR)/python$(VERSION)$(EXE)'
+- LDCXXSHARED="$LDCXXSHARED "'-bundle_loader $(BINDIR)/python$(VERSION)$(EXE)'
+- fi ;;
+- Darwin/*)
+- # Use -undefined dynamic_lookup whenever possible (10.3 and later).
+- # This allows an extension to be used in any Python
+-
+- if test ${MACOSX_DEPLOYMENT_TARGET} '>' 10.2
+- then
+- if test "${enable_universalsdk}"; then
+- LDFLAGS="${UNIVERSAL_ARCH_FLAGS} -isysroot ${UNIVERSALSDK} ${LDFLAGS}"
+- fi
+- LDSHARED='$(CC) -bundle -undefined dynamic_lookup'
+- LDCXXSHARED='$(CXX) -bundle -undefined dynamic_lookup'
+- BLDSHARED="$LDSHARED"
+- else
+- LDSHARED='$(CC) -bundle'
+- LDCXXSHARED='$(CXX) -bundle'
+- if test "$enable_framework" ; then
+- # Link against the framework. All externals should be defined.
+- BLDSHARED="$LDSHARED "'$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- LDSHARED="$LDSHARED "'$(PYTHONFRAMEWORKPREFIX)/$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- LDCXXSHARED="$LDCXXSHARED "'$(PYTHONFRAMEWORKPREFIX)/$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- else
+- # No framework, use the Python app as bundle-loader
+- BLDSHARED="$LDSHARED "'-bundle_loader $(BUILDPYTHON)'
+- LDSHARED="$LDSHARED "'-bundle_loader $(BINDIR)/python$(VERSION)$(EXE)'
+- LDCXXSHARED="$LDCXXSHARED "'-bundle_loader $(BINDIR)/python$(VERSION)$(EXE)'
+- fi
+- fi
+- ;;
+- Linux*|GNU*|QNX*)
+- LDSHARED='$(CC) -shared'
+- LDCXXSHARED='$(CXX) -shared';;
+- BSD/OS*/4*)
+- LDSHARED="gcc -shared"
+- LDCXXSHARED="g++ -shared";;
+- FreeBSD*)
+- if [[ "`$CC -dM -E - </dev/null | grep __ELF__`" != "" ]]
+- then
+- LDSHARED='$(CC) -shared'
+- LDCXXSHARED='$(CXX) -shared'
+- else
+- LDSHARED="ld -Bshareable"
+- fi;;
+- OpenBSD*)
+- if [[ "`$CC -dM -E - </dev/null | grep __ELF__`" != "" ]]
+- then
+- LDSHARED='$(CC) -shared $(CCSHARED)'
+- LDCXXSHARED='$(CXX) -shared $(CCSHARED)'
+- else
+- case `uname -r` in
+- [[01]].* | 2.[[0-7]] | 2.[[0-7]].*)
+- LDSHARED="ld -Bshareable ${LDFLAGS}"
+- ;;
+- *)
+- LDSHARED='$(CC) -shared $(CCSHARED)'
+- LDCXXSHARED='$(CXX) -shared $(CCSHARED)'
+- ;;
+- esac
+- fi;;
+- NetBSD*|DragonFly*)
+- LDSHARED='$(CC) -shared'
+- LDCXXSHARED='$(CXX) -shared';;
+- OpenUNIX*|UnixWare*)
+- if test "$GCC" = "yes" ; then
+- LDSHARED='$(CC) -shared'
+- LDCXXSHARED='$(CXX) -shared'
+- else
+- LDSHARED='$(CC) -G'
+- LDCXXSHARED='$(CXX) -G'
+- fi;;
+- SCO_SV*)
+- LDSHARED='$(CC) -Wl,-G,-Bexport'
+- LDCXXSHARED='$(CXX) -Wl,-G,-Bexport';;
+- CYGWIN*)
+- LDSHARED="gcc -shared -Wl,--enable-auto-image-base"
+- LDCXXSHARED="g++ -shared -Wl,--enable-auto-image-base";;
+- atheos*)
+- LDSHARED="gcc -shared"
+- LDCXXSHARED="g++ -shared";;
+- *) LDSHARED="ld";;
+- esac
+-fi
+-AC_MSG_RESULT($LDSHARED)
+-LDCXXSHARED=${LDCXXSHARED-$LDSHARED}
+-BLDSHARED=${BLDSHARED-$LDSHARED}
+-# CCSHARED are the C *flags* used to create objects to go into a shared
+-# library (module) -- this is only needed for a few systems
+-AC_MSG_CHECKING(CCSHARED)
+-if test -z "$CCSHARED"
+-then
+- case $ac_sys_system/$ac_sys_release in
+- SunOS*) if test "$GCC" = yes;
+- then CCSHARED="-fPIC";
+- elif test `uname -p` = sparc;
+- then CCSHARED="-xcode=pic32";
+- else CCSHARED="-Kpic";
+- fi;;
+- hp*|HP*) if test "$GCC" = yes;
+- then CCSHARED="-fPIC";
+- else CCSHARED="+z";
+- fi;;
+- Linux*|GNU*) CCSHARED="-fPIC";;
+- BSD/OS*/4*) CCSHARED="-fpic";;
+- FreeBSD*|NetBSD*|OpenBSD*|DragonFly*) CCSHARED="-fPIC";;
+- OpenUNIX*|UnixWare*)
+- if test "$GCC" = "yes"
+- then CCSHARED="-fPIC"
+- else CCSHARED="-KPIC"
+- fi;;
+- SCO_SV*)
+- if test "$GCC" = "yes"
+- then CCSHARED="-fPIC"
+- else CCSHARED="-Kpic -belf"
+- fi;;
+- IRIX*/6*) case $CC in
+- *gcc*) CCSHARED="-shared";;
+- *) CCSHARED="";;
+- esac;;
+- atheos*) CCSHARED="-fPIC";;
+- esac
+-fi
+-AC_MSG_RESULT($CCSHARED)
+-# LINKFORSHARED are the flags passed to the $(CC) command that links
+-# the python executable -- this is only needed for a few systems
+-AC_MSG_CHECKING(LINKFORSHARED)
+-if test -z "$LINKFORSHARED"
+-then
+- case $ac_sys_system/$ac_sys_release in
+- AIX*) LINKFORSHARED='-Wl,-bE:Modules/python.exp -lld';;
+- hp*|HP*)
+- LINKFORSHARED="-Wl,-E -Wl,+s";;
+-# LINKFORSHARED="-Wl,-E -Wl,+s -Wl,+b\$(BINLIBDEST)/lib-dynload";;
+- BSD/OS/4*) LINKFORSHARED="-Xlinker -export-dynamic";;
+- Linux*|GNU*) LINKFORSHARED="-Xlinker -export-dynamic";;
+- # -u libsys_s pulls in all symbols in libsys
+- Darwin/*)
+- # -u _PyMac_Error is needed to pull in the mac toolbox glue,
+- # which is
+- # not used by the core itself but which needs to be in the core so
+- # that dynamically loaded extension modules have access to it.
+- # -prebind is no longer used, because it actually seems to give a
+- # slowdown in stead of a speedup, maybe due to the large number of
+- # dynamic loads Python does.
+-
+- LINKFORSHARED="$extra_undefs"
+- if test "$enable_framework"
+- then
+- LINKFORSHARED="$LINKFORSHARED "'$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
+- fi
+- LINKFORSHARED="$LINKFORSHARED";;
+- OpenUNIX*|UnixWare*) LINKFORSHARED="-Wl,-Bexport";;
+- SCO_SV*) LINKFORSHARED="-Wl,-Bexport";;
+- ReliantUNIX*) LINKFORSHARED="-W1 -Blargedynsym";;
+- FreeBSD*|NetBSD*|OpenBSD*|DragonFly*)
+- if [[ "`$CC -dM -E - </dev/null | grep __ELF__`" != "" ]]
+- then
+- LINKFORSHARED="-Wl,--export-dynamic"
+- fi;;
+- SunOS/5*) case $CC in
+- *gcc*)
+- if $CC -Xlinker --help 2>&1 | grep export-dynamic >/dev/null
+- then
+- LINKFORSHARED="-Xlinker --export-dynamic"
+- fi;;
+- esac;;
+- CYGWIN*)
+- if test $enable_shared = "no"
+- then
+- LINKFORSHARED='-Wl,--out-implib=$(LDLIBRARY)'
+- fi;;
+- QNX*)
+- # -Wl,-E causes the symbols to be added to the dynamic
+- # symbol table so that they can be found when a module
+- # is loaded. -N 2048K causes the stack size to be set
+- # to 2048 kilobytes so that the stack doesn't overflow
+- # when running test_compile.py.
+- LINKFORSHARED='-Wl,-E -N 2048K';;
+- esac
+-fi
+-AC_MSG_RESULT($LINKFORSHARED)
+-
+-
+-AC_SUBST(CFLAGSFORSHARED)
+-AC_MSG_CHECKING(CFLAGSFORSHARED)
+-if test ! "$LIBRARY" = "$LDLIBRARY"
+-then
+- case $ac_sys_system in
+- CYGWIN*)
+- # Cygwin needs CCSHARED when building extension DLLs
+- # but not when building the interpreter DLL.
+- CFLAGSFORSHARED='';;
+- *)
+- CFLAGSFORSHARED='$(CCSHARED)'
+- esac
+-fi
+-AC_MSG_RESULT($CFLAGSFORSHARED)
+-
+-# SHLIBS are libraries (except -lc and -lm) to link to the python shared
+-# library (with --enable-shared).
+-# For platforms on which shared libraries are not allowed to have unresolved
+-# symbols, this must be set to $(LIBS) (expanded by make). We do this even
+-# if it is not required, since it creates a dependency of the shared library
+-# to LIBS. This, in turn, means that applications linking the shared libpython
+-# don't need to link LIBS explicitly. The default should be only changed
+-# on systems where this approach causes problems.
+-AC_SUBST(SHLIBS)
+-AC_MSG_CHECKING(SHLIBS)
+-case "$ac_sys_system" in
+- *)
+- SHLIBS='$(LIBS)';;
+-esac
+-AC_MSG_RESULT($SHLIBS)
+-
+-
+-# checks for libraries
+-AC_CHECK_LIB(dl, dlopen) # Dynamic linking for SunOS/Solaris and SYSV
+-AC_CHECK_LIB(dld, shl_load) # Dynamic linking for HP-UX
+-
+-# only check for sem_init if thread support is requested
+-if test "$with_threads" = "yes" -o -z "$with_threads"; then
+- AC_SEARCH_LIBS(sem_init, pthread rt posix4) # 'Real Time' functions on Solaris
+- # posix4 on Solaris 2.6
+- # pthread (first!) on Linux
+-fi
+-
+-# check if we need libintl for locale functions
+-AC_CHECK_LIB(intl, textdomain,
+- AC_DEFINE(WITH_LIBINTL, 1,
+- [Define to 1 if libintl is needed for locale functions.]))
+-
+-# checks for system dependent C++ extensions support
+-case "$ac_sys_system" in
+- AIX*) AC_MSG_CHECKING(for genuine AIX C++ extensions support)
+- AC_LINK_IFELSE([
+- AC_LANG_PROGRAM([[#include <load.h>]],
+- [[loadAndInit("", 0, "")]])
+- ],[
+- AC_DEFINE(AIX_GENUINE_CPLUSPLUS, 1,
+- [Define for AIX if your compiler is a genuine IBM xlC/xlC_r
+- and you want support for AIX C++ shared extension modules.])
+- AC_MSG_RESULT(yes)
+- ],[
+- AC_MSG_RESULT(no)
+- ]);;
+- *) ;;
+-esac
+-
+-# Most SVR4 platforms (e.g. Solaris) need -lsocket and -lnsl.
+-# BeOS' sockets are stashed in libnet.
+-AC_CHECK_LIB(nsl, t_open, [LIBS="-lnsl $LIBS"]) # SVR4
+-AC_CHECK_LIB(socket, socket, [LIBS="-lsocket $LIBS"], [], $LIBS) # SVR4 sockets
+-
+-case "$ac_sys_system" in
+-BeOS*)
+-AC_CHECK_LIB(net, socket, [LIBS="-lnet $LIBS"], [], $LIBS) # BeOS
+-;;
+-esac
+-
+-AC_MSG_CHECKING(for --with-libs)
+-AC_ARG_WITH(libs,
+- AS_HELP_STRING([--with-libs='lib1 ...'], [link against additional libs]),
+-[
+-AC_MSG_RESULT($withval)
+-LIBS="$withval $LIBS"
+-],
+-[AC_MSG_RESULT(no)])
+-
+-AC_PATH_TOOL([PKG_CONFIG], [pkg-config])
+-
+-# Check for use of the system expat library
+-AC_MSG_CHECKING(for --with-system-expat)
+-AC_ARG_WITH(system_expat,
+- AS_HELP_STRING([--with-system-expat], [build pyexpat module using an installed expat library]),
+- [],
+- [with_system_expat="no"])
+-
+-AC_MSG_RESULT($with_system_expat)
+-
+-# Check for use of the system libffi library
+-AC_MSG_CHECKING(for --with-system-ffi)
+-AC_ARG_WITH(system_ffi,
+- AS_HELP_STRING([--with-system-ffi], [build _ctypes module using an installed ffi library]),
+- [],
+- [with_system_ffi="no"])
+-
+-if test "$with_system_ffi" = "yes" && test -n "$PKG_CONFIG"; then
+- LIBFFI_INCLUDEDIR="`"$PKG_CONFIG" libffi --cflags-only-I 2>/dev/null | sed -e 's/^-I//;s/ *$//'`"
+-else
+- LIBFFI_INCLUDEDIR=""
+-fi
+-AC_SUBST(LIBFFI_INCLUDEDIR)
+-
+-AC_MSG_RESULT($with_system_ffi)
+-
+-# Check for --with-dbmliborder
+-AC_MSG_CHECKING(for --with-dbmliborder)
+-AC_ARG_WITH(dbmliborder,
+- AS_HELP_STRING([--with-dbmliborder=db1:db2:...], [order to check db backends for dbm. Valid value is a colon separated string with the backend names `ndbm', `gdbm' and `bdb'.]),
+-[
+-if test x$with_dbmliborder = xyes
+-then
+-AC_MSG_ERROR([proper usage is --with-dbmliborder=db1:db2:...])
+-else
+- for db in `echo $with_dbmliborder | sed 's/:/ /g'`; do
+- if test x$db != xndbm && test x$db != xgdbm && test x$db != xbdb
+- then
+- AC_MSG_ERROR([proper usage is --with-dbmliborder=db1:db2:...])
+- fi
+- done
+-fi])
+-AC_MSG_RESULT($with_dbmliborder)
+-
+-# Determine if signalmodule should be used.
+-AC_SUBST(USE_SIGNAL_MODULE)
+-AC_SUBST(SIGNAL_OBJS)
+-AC_MSG_CHECKING(for --with-signal-module)
+-AC_ARG_WITH(signal-module,
+- AS_HELP_STRING([--with-signal-module], [disable/enable signal module]))
+-
+-if test -z "$with_signal_module"
+-then with_signal_module="yes"
+-fi
+-AC_MSG_RESULT($with_signal_module)
+-
+-if test "${with_signal_module}" = "yes"; then
+- USE_SIGNAL_MODULE=""
+- SIGNAL_OBJS=""
+-else
+- USE_SIGNAL_MODULE="#"
+- SIGNAL_OBJS="Parser/intrcheck.o Python/sigcheck.o"
+-fi
+-
+-# This is used to generate Setup.config
+-AC_SUBST(USE_THREAD_MODULE)
+-USE_THREAD_MODULE=""
+-
+-AC_MSG_CHECKING(for --with-dec-threads)
+-AC_SUBST(LDLAST)
+-AC_ARG_WITH(dec-threads,
+- AS_HELP_STRING([--with-dec-threads], [use DEC Alpha/OSF1 thread-safe libraries]),
+-[
+-AC_MSG_RESULT($withval)
+-LDLAST=-threads
+-if test "${with_thread+set}" != set; then
+- with_thread="$withval";
+-fi],
+-[AC_MSG_RESULT(no)])
+-
+-# Templates for things AC_DEFINEd more than once.
+-# For a single AC_DEFINE, no template is needed.
+-AH_TEMPLATE(C_THREADS,[Define if you have the Mach cthreads package])
+-AH_TEMPLATE(_REENTRANT,
+- [Define to force use of thread-safe errno, h_errno, and other functions])
+-AH_TEMPLATE(WITH_THREAD,
+- [Define if you want to compile in rudimentary thread support])
+-
+-AC_MSG_CHECKING(for --with-threads)
+-dnl quadrigraphs "@<:@" and "@:>@" produce "[" and "]" in the output
+-AC_ARG_WITH(threads,
+- AS_HELP_STRING([--with(out)-threads@<:@=DIRECTORY@:>@], [disable/enable thread support]))
+-
+-# --with-thread is deprecated, but check for it anyway
+-dnl quadrigraphs "@<:@" and "@:>@" produce "[" and "]" in the output
+-AC_ARG_WITH(thread,
+- AS_HELP_STRING([--with(out)-thread@<:@=DIRECTORY@:>@], [deprecated; use --with(out)-threads]),
+- [with_threads=$with_thread])
+-
+-if test -z "$with_threads"
+-then with_threads="yes"
+-fi
+-AC_MSG_RESULT($with_threads)
+-
+-AC_SUBST(THREADOBJ)
+-if test "$with_threads" = "no"
+-then
+- USE_THREAD_MODULE="#"
+-elif test "$ac_cv_pthread_is_default" = yes
+-then
+- AC_DEFINE(WITH_THREAD)
+- # Defining _REENTRANT on system with POSIX threads should not hurt.
+- AC_DEFINE(_REENTRANT)
+- posix_threads=yes
+- THREADOBJ="Python/thread.o"
+-elif test "$ac_cv_kpthread" = "yes"
+-then
+- CC="$CC -Kpthread"
+- if test "$ac_cv_cxx_thread" = "yes"; then
+- CXX="$CXX -Kpthread"
+- fi
+- AC_DEFINE(WITH_THREAD)
+- posix_threads=yes
+- THREADOBJ="Python/thread.o"
+-elif test "$ac_cv_kthread" = "yes"
+-then
+- CC="$CC -Kthread"
+- if test "$ac_cv_cxx_thread" = "yes"; then
+- CXX="$CXX -Kthread"
+- fi
+- AC_DEFINE(WITH_THREAD)
+- posix_threads=yes
+- THREADOBJ="Python/thread.o"
+-elif test "$ac_cv_pthread" = "yes"
+-then
+- CC="$CC -pthread"
+- if test "$ac_cv_cxx_thread" = "yes"; then
+- CXX="$CXX -pthread"
+- fi
+- AC_DEFINE(WITH_THREAD)
+- posix_threads=yes
+- THREADOBJ="Python/thread.o"
+-else
+- if test ! -z "$with_threads" -a -d "$with_threads"
+- then LDFLAGS="$LDFLAGS -L$with_threads"
+- fi
+- if test ! -z "$withval" -a -d "$withval"
+- then LDFLAGS="$LDFLAGS -L$withval"
+- fi
+-
+- # According to the POSIX spec, a pthreads implementation must
+- # define _POSIX_THREADS in unistd.h. Some apparently don't
+- # (e.g. gnu pth with pthread emulation)
+- AC_MSG_CHECKING(for _POSIX_THREADS in unistd.h)
+- AC_EGREP_CPP(yes,
+- [
+-#include <unistd.h>
+-#ifdef _POSIX_THREADS
+-yes
+-#endif
+- ], unistd_defines_pthreads=yes, unistd_defines_pthreads=no)
+- AC_MSG_RESULT($unistd_defines_pthreads)
+-
+- AC_DEFINE(_REENTRANT)
+- AC_CHECK_HEADER(cthreads.h, [AC_DEFINE(WITH_THREAD)
+- AC_DEFINE(C_THREADS)
+- AC_DEFINE(HURD_C_THREADS, 1,
+- [Define if you are using Mach cthreads directly under /include])
+- LIBS="$LIBS -lthreads"
+- THREADOBJ="Python/thread.o"],[
+- AC_CHECK_HEADER(mach/cthreads.h, [AC_DEFINE(WITH_THREAD)
+- AC_DEFINE(C_THREADS)
+- AC_DEFINE(MACH_C_THREADS, 1,
+- [Define if you are using Mach cthreads under mach /])
+- THREADOBJ="Python/thread.o"],[
+- AC_MSG_CHECKING(for --with-pth)
+- AC_ARG_WITH([pth],
+- AS_HELP_STRING([--with-pth], [use GNU pth threading libraries]),
+- [AC_MSG_RESULT($withval)
+- AC_DEFINE([WITH_THREAD])
+- AC_DEFINE([HAVE_PTH], 1,
+- [Define if you have GNU PTH threads.])
+- LIBS="-lpth $LIBS"
+- THREADOBJ="Python/thread.o"],
+- [AC_MSG_RESULT(no)
+-
+- # Just looking for pthread_create in libpthread is not enough:
+- # on HP/UX, pthread.h renames pthread_create to a different symbol name.
+- # So we really have to include pthread.h, and then link.
+- _libs=$LIBS
+- LIBS="$LIBS -lpthread"
+- AC_MSG_CHECKING([for pthread_create in -lpthread])
+- AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <pthread.h>
+-
+-void * start_routine (void *arg) { exit (0); }]], [[
+-pthread_create (NULL, NULL, start_routine, NULL)]])],[
+- AC_MSG_RESULT(yes)
+- AC_DEFINE(WITH_THREAD)
+- posix_threads=yes
+- THREADOBJ="Python/thread.o"],[
+- LIBS=$_libs
+- AC_CHECK_FUNC(pthread_detach, [AC_DEFINE(WITH_THREAD)
+- posix_threads=yes
+- THREADOBJ="Python/thread.o"],[
+- AC_CHECK_HEADER(atheos/threads.h, [AC_DEFINE(WITH_THREAD)
+- AC_DEFINE(ATHEOS_THREADS, 1,
+- [Define this if you have AtheOS threads.])
+- THREADOBJ="Python/thread.o"],[
+- AC_CHECK_HEADER(kernel/OS.h, [AC_DEFINE(WITH_THREAD)
+- AC_DEFINE(BEOS_THREADS, 1,
+- [Define this if you have BeOS threads.])
+- THREADOBJ="Python/thread.o"],[
+- AC_CHECK_LIB(pthreads, pthread_create, [AC_DEFINE(WITH_THREAD)
+- posix_threads=yes
+- LIBS="$LIBS -lpthreads"
+- THREADOBJ="Python/thread.o"], [
+- AC_CHECK_LIB(c_r, pthread_create, [AC_DEFINE(WITH_THREAD)
+- posix_threads=yes
+- LIBS="$LIBS -lc_r"
+- THREADOBJ="Python/thread.o"], [
+- AC_CHECK_LIB(pthread, __pthread_create_system, [AC_DEFINE(WITH_THREAD)
+- posix_threads=yes
+- LIBS="$LIBS -lpthread"
+- THREADOBJ="Python/thread.o"], [
+- AC_CHECK_LIB(cma, pthread_create, [AC_DEFINE(WITH_THREAD)
+- posix_threads=yes
+- LIBS="$LIBS -lcma"
+- THREADOBJ="Python/thread.o"],[
+- USE_THREAD_MODULE="#"])
+- ])])])])])])])])])])
+-
+- AC_CHECK_LIB(mpc, usconfig, [AC_DEFINE(WITH_THREAD)
+- LIBS="$LIBS -lmpc"
+- THREADOBJ="Python/thread.o"
+- USE_THREAD_MODULE=""])
+-
+- if test "$posix_threads" != "yes"; then
+- AC_CHECK_LIB(thread, thr_create, [AC_DEFINE(WITH_THREAD)
+- LIBS="$LIBS -lthread"
+- THREADOBJ="Python/thread.o"
+- USE_THREAD_MODULE=""])
+- fi
+-
+- if test "$USE_THREAD_MODULE" != "#"
+- then
+- # If the above checks didn't disable threads, (at least) OSF1
+- # needs this '-threads' argument during linking.
+- case $ac_sys_system in
+- OSF1) LDLAST=-threads;;
+- esac
+- fi
+-fi
+-
+-if test "$posix_threads" = "yes"; then
+- if test "$unistd_defines_pthreads" = "no"; then
+- AC_DEFINE(_POSIX_THREADS, 1,
+- [Define if you have POSIX threads,
+- and your system does not define that.])
+- fi
+-
+- # Bug 662787: Using semaphores causes unexplicable hangs on Solaris 8.
+- case $ac_sys_system/$ac_sys_release in
+- SunOS/5.6) AC_DEFINE(HAVE_PTHREAD_DESTRUCTOR, 1,
+- [Defined for Solaris 2.6 bug in pthread header.])
+- ;;
+- SunOS/5.8) AC_DEFINE(HAVE_BROKEN_POSIX_SEMAPHORES, 1,
+- [Define if the Posix semaphores do not work on your system])
+- ;;
+- AIX/*) AC_DEFINE(HAVE_BROKEN_POSIX_SEMAPHORES, 1,
+- [Define if the Posix semaphores do not work on your system])
+- ;;
+- esac
+-
+- AC_MSG_CHECKING(if PTHREAD_SCOPE_SYSTEM is supported)
+- AC_CACHE_VAL(ac_cv_pthread_system_supported,
+- [AC_RUN_IFELSE([AC_LANG_SOURCE([[#include <pthread.h>
+- void *foo(void *parm) {
+- return NULL;
+- }
+- main() {
+- pthread_attr_t attr;
+- pthread_t id;
+- if (pthread_attr_init(&attr)) exit(-1);
+- if (pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM)) exit(-1);
+- if (pthread_create(&id, &attr, foo, NULL)) exit(-1);
+- exit(0);
+- }]])],
+- [ac_cv_pthread_system_supported=yes],
+- [ac_cv_pthread_system_supported=no],
+- [ac_cv_pthread_system_supported=no])
+- ])
+- AC_MSG_RESULT($ac_cv_pthread_system_supported)
+- if test "$ac_cv_pthread_system_supported" = "yes"; then
+- AC_DEFINE(PTHREAD_SYSTEM_SCHED_SUPPORTED, 1, [Defined if PTHREAD_SCOPE_SYSTEM supported.])
+- fi
+- AC_CHECK_FUNCS(pthread_sigmask,
+- [case $ac_sys_system in
+- CYGWIN*)
+- AC_DEFINE(HAVE_BROKEN_PTHREAD_SIGMASK, 1,
+- [Define if pthread_sigmask() does not work on your system.])
+- ;;
+- esac])
+-fi
+-
+-
+-# Check for enable-ipv6
+-AH_TEMPLATE(ENABLE_IPV6, [Define if --enable-ipv6 is specified])
+-AC_MSG_CHECKING([if --enable-ipv6 is specified])
+-AC_ARG_ENABLE(ipv6,
+-[ --enable-ipv6 Enable ipv6 (with ipv4) support
+- --disable-ipv6 Disable ipv6 support],
+-[ case "$enableval" in
+- no)
+- AC_MSG_RESULT(no)
+- ipv6=no
+- ;;
+- *) AC_MSG_RESULT(yes)
+- AC_DEFINE(ENABLE_IPV6)
+- ipv6=yes
+- ;;
+- esac ],
+-
+-[
+-dnl the check does not work on cross compilation case...
+- AC_RUN_IFELSE([AC_LANG_SOURCE([[ /* AF_INET6 available check */
+-#include <sys/types.h>
+-#include <sys/socket.h>
+-main()
+-{
+- if (socket(AF_INET6, SOCK_STREAM, 0) < 0)
+- exit(1);
+- else
+- exit(0);
+-}
+-]])],[
+- AC_MSG_RESULT(yes)
+- ipv6=yes
+-],[
+- AC_MSG_RESULT(no)
+- ipv6=no
+-],[
+- AC_MSG_RESULT(no)
+- ipv6=no
+-])
+-
+-if test "$ipv6" = "yes"; then
+- AC_MSG_CHECKING(if RFC2553 API is available)
+- AC_COMPILE_IFELSE([
+- AC_LANG_PROGRAM([[#include <sys/types.h>
+-#include <netinet/in.h>]],
+- [[struct sockaddr_in6 x;
+- x.sin6_scope_id;]])
+- ],[
+- AC_MSG_RESULT(yes)
+- ipv6=yes
+- ],[
+- AC_MSG_RESULT(no, IPv6 disabled)
+- ipv6=no
+- ])
+-fi
+-
+-if test "$ipv6" = "yes"; then
+- AC_DEFINE(ENABLE_IPV6)
+-fi
+-])
+-
+-ipv6type=unknown
+-ipv6lib=none
+-ipv6trylibc=no
+-
+-if test "$ipv6" = "yes"; then
+- AC_MSG_CHECKING([ipv6 stack type])
+- for i in inria kame linux-glibc linux-inet6 solaris toshiba v6d zeta;
+- do
+- case $i in
+- inria)
+- dnl http://www.kame.net/
+- AC_EGREP_CPP(yes, [
+-#include <netinet/in.h>
+-#ifdef IPV6_INRIA_VERSION
+-yes
+-#endif],
+- [ipv6type=$i])
+- ;;
+- kame)
+- dnl http://www.kame.net/
+- AC_EGREP_CPP(yes, [
+-#include <netinet/in.h>
+-#ifdef __KAME__
+-yes
+-#endif],
+- [ipv6type=$i;
+- ipv6lib=inet6
+- ipv6libdir=/usr/local/v6/lib
+- ipv6trylibc=yes])
+- ;;
+- linux-glibc)
+- dnl http://www.v6.linux.or.jp/
+- AC_EGREP_CPP(yes, [
+-#include <features.h>
+-#if defined(__GLIBC__) && ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1) || (__GLIBC__ > 2))
+-yes
+-#endif],
+- [ipv6type=$i;
+- ipv6trylibc=yes])
+- ;;
+- linux-inet6)
+- dnl http://www.v6.linux.or.jp/
+- if test -d /usr/inet6; then
+- ipv6type=$i
+- ipv6lib=inet6
+- ipv6libdir=/usr/inet6/lib
+- BASECFLAGS="-I/usr/inet6/include $BASECFLAGS"
+- fi
+- ;;
+- solaris)
+- if test -f /etc/netconfig; then
+- if $GREP -q tcp6 /etc/netconfig; then
+- ipv6type=$i
+- ipv6trylibc=yes
+- fi
+- fi
+- ;;
+- toshiba)
+- AC_EGREP_CPP(yes, [
+-#include <sys/param.h>
+-#ifdef _TOSHIBA_INET6
+-yes
+-#endif],
+- [ipv6type=$i;
+- ipv6lib=inet6;
+- ipv6libdir=/usr/local/v6/lib])
+- ;;
+- v6d)
+- AC_EGREP_CPP(yes, [
+-#include </usr/local/v6/include/sys/v6config.h>
+-#ifdef __V6D__
+-yes
+-#endif],
+- [ipv6type=$i;
+- ipv6lib=v6;
+- ipv6libdir=/usr/local/v6/lib;
+- BASECFLAGS="-I/usr/local/v6/include $BASECFLAGS"])
+- ;;
+- zeta)
+- AC_EGREP_CPP(yes, [
+-#include <sys/param.h>
+-#ifdef _ZETA_MINAMI_INET6
+-yes
+-#endif],
+- [ipv6type=$i;
+- ipv6lib=inet6;
+- ipv6libdir=/usr/local/v6/lib])
+- ;;
+- esac
+- if test "$ipv6type" != "unknown"; then
+- break
+- fi
+- done
+- AC_MSG_RESULT($ipv6type)
+-fi
+-
+-if test "$ipv6" = "yes" -a "$ipv6lib" != "none"; then
+- if test -d $ipv6libdir -a -f $ipv6libdir/lib$ipv6lib.a; then
+- LIBS="-L$ipv6libdir -l$ipv6lib $LIBS"
+- echo "using lib$ipv6lib"
+- else
+- if test $ipv6trylibc = "yes"; then
+- echo "using libc"
+- else
+- echo 'Fatal: no $ipv6lib library found. cannot continue.'
+- echo "You need to fetch lib$ipv6lib.a from appropriate"
+- echo 'ipv6 kit and compile beforehand.'
+- exit 1
+- fi
+- fi
+-fi
+-
+-AC_MSG_CHECKING(for OSX 10.5 SDK or later)
+-AC_COMPILE_IFELSE([
+- AC_LANG_PROGRAM([[#include <Carbon/Carbon.h>]], [[FSIORefNum fRef = 0]])
+-],[
+- AC_DEFINE(HAVE_OSX105_SDK, 1, [Define if compiling using MacOS X 10.5 SDK or later.])
+- AC_MSG_RESULT(yes)
+-],[
+- AC_MSG_RESULT(no)
+-])
+-
+-# Check for --with-doc-strings
+-AC_MSG_CHECKING(for --with-doc-strings)
+-AC_ARG_WITH(doc-strings,
+- AS_HELP_STRING([--with(out)-doc-strings], [disable/enable documentation strings]))
+-
+-if test -z "$with_doc_strings"
+-then with_doc_strings="yes"
+-fi
+-if test "$with_doc_strings" != "no"
+-then
+- AC_DEFINE(WITH_DOC_STRINGS, 1,
+- [Define if you want documentation strings in extension modules])
+-fi
+-AC_MSG_RESULT($with_doc_strings)
+-
+-# Check for Python-specific malloc support
+-AC_MSG_CHECKING(for --with-tsc)
+-AC_ARG_WITH(tsc,
+- AS_HELP_STRING([--with(out)-tsc],[enable/disable timestamp counter profile]),[
+-if test "$withval" != no
+-then
+- AC_DEFINE(WITH_TSC, 1,
+- [Define to profile with the Pentium timestamp counter])
+- AC_MSG_RESULT(yes)
+-else AC_MSG_RESULT(no)
+-fi],
+-[AC_MSG_RESULT(no)])
+-
+-# Check for Python-specific malloc support
+-AC_MSG_CHECKING(for --with-pymalloc)
+-AC_ARG_WITH(pymalloc,
+- AS_HELP_STRING([--with(out)-pymalloc], [disable/enable specialized mallocs]))
+-
+-if test -z "$with_pymalloc"
+-then with_pymalloc="yes"
+-fi
+-if test "$with_pymalloc" != "no"
+-then
+- AC_DEFINE(WITH_PYMALLOC, 1,
+- [Define if you want to compile in Python-specific mallocs])
+-fi
+-AC_MSG_RESULT($with_pymalloc)
+-
+-# Check for Valgrind support
+-AC_MSG_CHECKING([for --with-valgrind])
+-AC_ARG_WITH([valgrind],
+- AS_HELP_STRING([--with-valgrind], [Enable Valgrind support]),,
+- with_valgrind=no)
+-AC_MSG_RESULT([$with_valgrind])
+-if test "$with_valgrind" != no; then
+- AC_CHECK_HEADER([valgrind/valgrind.h],
+- [AC_DEFINE([WITH_VALGRIND], 1, [Define if you want pymalloc to be disabled when running under valgrind])],
+- [AC_MSG_ERROR([Valgrind support requested but headers not available])]
+- )
+-fi
+-
+-# Check for --with-wctype-functions
+-AC_MSG_CHECKING(for --with-wctype-functions)
+-AC_ARG_WITH(wctype-functions,
+- AS_HELP_STRING([--with-wctype-functions], [use wctype.h functions]),
+-[
+-if test "$withval" != no
+-then
+- AC_DEFINE(WANT_WCTYPE_FUNCTIONS, 1,
+- [Define if you want wctype.h functions to be used instead of the
+- one supplied by Python itself. (see Include/unicodectype.h).])
+- AC_MSG_RESULT(yes)
+-else AC_MSG_RESULT(no)
+-fi],
+-[AC_MSG_RESULT(no)])
+-
+-# -I${DLINCLDIR} is added to the compile rule for importdl.o
+-AC_SUBST(DLINCLDIR)
+-DLINCLDIR=.
+-
+-# the dlopen() function means we might want to use dynload_shlib.o. some
+-# platforms, such as AIX, have dlopen(), but don't want to use it.
+-AC_CHECK_FUNCS(dlopen)
+-
+-# DYNLOADFILE specifies which dynload_*.o file we will use for dynamic
+-# loading of modules.
+-AC_SUBST(DYNLOADFILE)
+-AC_MSG_CHECKING(DYNLOADFILE)
+-if test -z "$DYNLOADFILE"
+-then
+- case $ac_sys_system/$ac_sys_release in
+- AIX*) # Use dynload_shlib.c and dlopen() if we have it; otherwise dynload_aix.c
+- if test "$ac_cv_func_dlopen" = yes
+- then DYNLOADFILE="dynload_shlib.o"
+- else DYNLOADFILE="dynload_aix.o"
+- fi
+- ;;
+- BeOS*) DYNLOADFILE="dynload_beos.o";;
+- hp*|HP*) DYNLOADFILE="dynload_hpux.o";;
+- # Use dynload_next.c only on 10.2 and below, which don't have native dlopen()
+- Darwin/@<:@0156@:>@\..*) DYNLOADFILE="dynload_next.o";;
+- atheos*) DYNLOADFILE="dynload_atheos.o";;
+- *)
+- # use dynload_shlib.c and dlopen() if we have it; otherwise stub
+- # out any dynamic loading
+- if test "$ac_cv_func_dlopen" = yes
+- then DYNLOADFILE="dynload_shlib.o"
+- else DYNLOADFILE="dynload_stub.o"
+- fi
+- ;;
+- esac
+-fi
+-AC_MSG_RESULT($DYNLOADFILE)
+-if test "$DYNLOADFILE" != "dynload_stub.o"
+-then
+- AC_DEFINE(HAVE_DYNAMIC_LOADING, 1,
+- [Defined when any dynamic module loading is enabled.])
+-fi
+-
+-# MACHDEP_OBJS can be set to platform-specific object files needed by Python
+-
+-AC_SUBST(MACHDEP_OBJS)
+-AC_MSG_CHECKING(MACHDEP_OBJS)
+-if test -z "$MACHDEP_OBJS"
+-then
+- MACHDEP_OBJS=$extra_machdep_objs
+-else
+- MACHDEP_OBJS="$MACHDEP_OBJS $extra_machdep_objs"
+-fi
+-AC_MSG_RESULT(MACHDEP_OBJS)
+-
+-# checks for library functions
+-AC_CHECK_FUNCS(alarm setitimer getitimer bind_textdomain_codeset chown \
+- clock confstr ctermid execv fchmod fchown fork fpathconf ftime ftruncate \
+- gai_strerror getgroups getlogin getloadavg getpeername getpgid getpid \
+- getpriority getresuid getresgid getpwent getspnam getspent getsid getwd \
+- initgroups kill killpg lchmod lchown lstat mkfifo mknod mktime \
+- mremap nice pathconf pause plock poll pthread_init \
+- putenv readlink realpath \
+- select sem_open sem_timedwait sem_getvalue sem_unlink setegid seteuid \
+- setgid \
+- setlocale setregid setreuid setsid setpgid setpgrp setuid setvbuf snprintf \
+- setlocale setregid setreuid setresuid setresgid \
+- setsid setpgid setpgrp setuid setvbuf snprintf \
+- sigaction siginterrupt sigrelse strftime \
+- sysconf tcgetpgrp tcsetpgrp tempnam timegm times tmpfile tmpnam tmpnam_r \
+- truncate uname unsetenv utimes waitpid wait3 wait4 wcscoll _getpty)
+-
+-# For some functions, having a definition is not sufficient, since
+-# we want to take their address.
+-AC_MSG_CHECKING(for chroot)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[void *x=chroot]])],
+- [AC_DEFINE(HAVE_CHROOT, 1, Define if you have the 'chroot' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-AC_MSG_CHECKING(for link)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[void *x=link]])],
+- [AC_DEFINE(HAVE_LINK, 1, Define if you have the 'link' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-AC_MSG_CHECKING(for symlink)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[void *x=symlink]])],
+- [AC_DEFINE(HAVE_SYMLINK, 1, Define if you have the 'symlink' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-AC_MSG_CHECKING(for fchdir)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[void *x=fchdir]])],
+- [AC_DEFINE(HAVE_FCHDIR, 1, Define if you have the 'fchdir' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-AC_MSG_CHECKING(for fsync)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[void *x=fsync]])],
+- [AC_DEFINE(HAVE_FSYNC, 1, Define if you have the 'fsync' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-AC_MSG_CHECKING(for fdatasync)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[void *x=fdatasync]])],
+- [AC_DEFINE(HAVE_FDATASYNC, 1, Define if you have the 'fdatasync' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-AC_MSG_CHECKING(for epoll)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/epoll.h>]], [[void *x=epoll_create]])],
+- [AC_DEFINE(HAVE_EPOLL, 1, Define if you have the 'epoll' functions.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-AC_MSG_CHECKING(for kqueue)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-#include <sys/types.h>
+-#include <sys/event.h>
+- ]], [[int x=kqueue()]])],
+- [AC_DEFINE(HAVE_KQUEUE, 1, Define if you have the 'kqueue' functions.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-# On some systems (eg. FreeBSD 5), we would find a definition of the
+-# functions ctermid_r, setgroups in the library, but no prototype
+-# (e.g. because we use _XOPEN_SOURCE). See whether we can take their
+-# address to avoid compiler warnings and potential miscompilations
+-# because of the missing prototypes.
+-
+-AC_MSG_CHECKING(for ctermid_r)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-#include <stdio.h>
+-]], [[void* p = ctermid_r]])],
+- [AC_DEFINE(HAVE_CTERMID_R, 1, Define if you have the 'ctermid_r' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-
+-AC_CACHE_CHECK([for flock declaration], [ac_cv_flock_decl],
+- [AC_COMPILE_IFELSE(
+- [AC_LANG_PROGRAM(
+- [#include <sys/file.h>],
+- [void* p = flock]
+- )],
+- [ac_cv_flock_decl=yes],
+- [ac_cv_flock_decl=no]
+- )
+-])
+-if test "x${ac_cv_flock_decl}" = xyes; then
+- AC_CHECK_FUNCS(flock,,
+- AC_CHECK_LIB(bsd,flock,
+- [AC_DEFINE(HAVE_FLOCK)
+- AC_DEFINE(FLOCK_NEEDS_LIBBSD, 1, Define if flock needs to be linked with bsd library.)
+- ])
+- )
+-fi
+-
+-AC_MSG_CHECKING(for getpagesize)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-#include <unistd.h>
+-]], [[void* p = getpagesize]])],
+- [AC_DEFINE(HAVE_GETPAGESIZE, 1, Define if you have the 'getpagesize' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-
+-AC_MSG_CHECKING(for broken unsetenv)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-#include <stdlib.h>
+-]], [[int res = unsetenv("DUMMY")]])],
+- [AC_MSG_RESULT(no)],
+- [AC_DEFINE(HAVE_BROKEN_UNSETENV, 1, Define if `unsetenv` does not return an int.)
+- AC_MSG_RESULT(yes)
+-])
+-
+-dnl check for true
+-AC_CHECK_PROGS(TRUE, true, /bin/true)
+-
+-dnl On some systems (e.g. Solaris 9), hstrerror and inet_aton are in -lresolv
+-dnl On others, they are in the C library, so we to take no action
+-AC_CHECK_LIB(c, inet_aton, [$ac_cv_prog_TRUE],
+- AC_CHECK_LIB(resolv, inet_aton)
+-)
+-
+-# On Tru64, chflags seems to be present, but calling it will
+-# exit Python
+-AC_CACHE_CHECK([for chflags], [ac_cv_have_chflags], [dnl
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <sys/stat.h>
+-#include <unistd.h>
+-int main(int argc, char*argv[])
+-{
+- if(chflags(argv[0], 0) != 0)
+- return 1;
+- return 0;
+-}
+-]])],
+-[ac_cv_have_chflags=yes],
+-[ac_cv_have_chflags=no],
+-[ac_cv_have_chflags=cross])
+-])
+-if test "$ac_cv_have_chflags" = cross ; then
+- AC_CHECK_FUNC([chflags], [ac_cv_have_chflags="yes"], [ac_cv_have_chflags="no"])
+-fi
+-if test "$ac_cv_have_chflags" = yes ; then
+- AC_DEFINE(HAVE_CHFLAGS, 1, [Define to 1 if you have the 'chflags' function.])
+-fi
+-
+-AC_CACHE_CHECK([for lchflags], [ac_cv_have_lchflags], [dnl
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <sys/stat.h>
+-#include <unistd.h>
+-int main(int argc, char*argv[])
+-{
+- if(lchflags(argv[0], 0) != 0)
+- return 1;
+- return 0;
+-}
+-]])],[ac_cv_have_lchflags=yes],[ac_cv_have_lchflags=no],[ac_cv_have_lchflags=cross])
+-])
+-if test "$ac_cv_have_lchflags" = cross ; then
+- AC_CHECK_FUNC([lchflags], [ac_cv_have_lchflags="yes"], [ac_cv_have_lchflags="no"])
+-fi
+-if test "$ac_cv_have_lchflags" = yes ; then
+- AC_DEFINE(HAVE_LCHFLAGS, 1, [Define to 1 if you have the 'lchflags' function.])
+-fi
+-
+-dnl Check if system zlib has *Copy() functions
+-dnl
+-dnl On MacOSX the linker will search for dylibs on the entire linker path
+-dnl before searching for static libraries. setup.py adds -Wl,-search_paths_first
+-dnl to revert to a more traditional unix behaviour and make it possible to
+-dnl override the system libz with a local static library of libz. Temporarily
+-dnl add that flag to our CFLAGS as well to ensure that we check the version
+-dnl of libz that will be used by setup.py.
+-dnl The -L/usr/local/lib is needed as wel to get the same compilation
+-dnl environment as setup.py (and leaving it out can cause configure to use the
+-dnl wrong version of the library)
+-case $ac_sys_system/$ac_sys_release in
+-Darwin/*)
+- _CUR_CFLAGS="${CFLAGS}"
+- _CUR_LDFLAGS="${LDFLAGS}"
+- CFLAGS="${CFLAGS} -Wl,-search_paths_first"
+- LDFLAGS="${LDFLAGS} -Wl,-search_paths_first -L/usr/local/lib"
+- ;;
+-esac
+-
+-AC_CHECK_LIB(z, inflateCopy, AC_DEFINE(HAVE_ZLIB_COPY, 1, [Define if the zlib library has inflateCopy]))
+-
+-case $ac_sys_system/$ac_sys_release in
+-Darwin/*)
+- CFLAGS="${_CUR_CFLAGS}"
+- LDFLAGS="${_CUR_LDFLAGS}"
+- ;;
+-esac
+-
+-AC_MSG_CHECKING(for hstrerror)
+-AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+-#include <netdb.h>
+-]], [[void* p = hstrerror; hstrerror(0)]])],
+- [AC_DEFINE(HAVE_HSTRERROR, 1, Define if you have the 'hstrerror' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-
+-AC_MSG_CHECKING(for inet_aton)
+-AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+-#include <sys/types.h>
+-#include <sys/socket.h>
+-#include <netinet/in.h>
+-#include <arpa/inet.h>
+-]], [[void* p = inet_aton;inet_aton(0,0)]])],
+- [AC_DEFINE(HAVE_INET_ATON, 1, Define if you have the 'inet_aton' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-
+-AC_MSG_CHECKING(for inet_pton)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-#include <sys/types.h>
+-#include <sys/socket.h>
+-#include <netinet/in.h>
+-#include <arpa/inet.h>
+-]], [[void* p = inet_pton]])],
+- [AC_DEFINE(HAVE_INET_PTON, 1, Define if you have the 'inet_pton' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-
+-# On some systems, setgroups is in unistd.h, on others, in grp.h
+-AC_MSG_CHECKING(for setgroups)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-#include <unistd.h>
+-#ifdef HAVE_GRP_H
+-#include <grp.h>
+-#endif
+-]], [[void* p = setgroups]])],
+- [AC_DEFINE(HAVE_SETGROUPS, 1, Define if you have the 'setgroups' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)
+-])
+-
+-# check for openpty and forkpty
+-
+-AC_CHECK_FUNCS(openpty,,
+- AC_CHECK_LIB(util,openpty,
+- [AC_DEFINE(HAVE_OPENPTY) LIBS="$LIBS -lutil"],
+- AC_CHECK_LIB(bsd,openpty, [AC_DEFINE(HAVE_OPENPTY) LIBS="$LIBS -lbsd"])
+- )
+-)
+-AC_CHECK_FUNCS(forkpty,,
+- AC_CHECK_LIB(util,forkpty,
+- [AC_DEFINE(HAVE_FORKPTY) LIBS="$LIBS -lutil"],
+- AC_CHECK_LIB(bsd,forkpty, [AC_DEFINE(HAVE_FORKPTY) LIBS="$LIBS -lbsd"])
+- )
+-)
+-
+-# Stuff for expat.
+-AC_CHECK_FUNCS(memmove)
+-
+-# check for long file support functions
+-AC_CHECK_FUNCS(fseek64 fseeko fstatvfs ftell64 ftello statvfs)
+-
+-AC_REPLACE_FUNCS(dup2 getcwd strdup)
+-AC_CHECK_FUNCS(getpgrp,
+- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[getpgrp(0);]])],
+- [AC_DEFINE(GETPGRP_HAVE_ARG, 1, [Define if getpgrp() must be called as getpgrp(0).])],
+- [])
+-)
+-AC_CHECK_FUNCS(setpgrp,
+- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>]], [[setpgrp(0,0);]])],
+- [AC_DEFINE(SETPGRP_HAVE_ARG, 1, [Define if setpgrp() must be called as setpgrp(0, 0).])],
+- [])
+-)
+-AC_CHECK_FUNCS(gettimeofday,
+- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/time.h>]],
+- [[gettimeofday((struct timeval*)0,(struct timezone*)0);]])],
+- [],
+- [AC_DEFINE(GETTIMEOFDAY_NO_TZ, 1,
+- [Define if gettimeofday() does not have second (timezone) argument
+- This is the case on Motorola V4 (R40V4.2)])
+- ])
+-)
+-
+-AC_MSG_CHECKING(for major, minor, and makedev)
+-AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+-#if defined(MAJOR_IN_MKDEV)
+-#include <sys/mkdev.h>
+-#elif defined(MAJOR_IN_SYSMACROS)
+-#include <sys/sysmacros.h>
+-#else
+-#include <sys/types.h>
+-#endif
+-]], [[
+- makedev(major(0),minor(0));
+-]])],[
+- AC_DEFINE(HAVE_DEVICE_MACROS, 1,
+- [Define to 1 if you have the device macros.])
+- AC_MSG_RESULT(yes)
+-],[
+- AC_MSG_RESULT(no)
+-])
+-
+-# On OSF/1 V5.1, getaddrinfo is available, but a define
+-# for [no]getaddrinfo in netdb.h.
+-AC_MSG_CHECKING(for getaddrinfo)
+-AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+-#include <sys/types.h>
+-#include <sys/socket.h>
+-#include <netdb.h>
+-#include <stdio.h>
+-]], [[getaddrinfo(NULL, NULL, NULL, NULL);]])],
+-[have_getaddrinfo=yes],
+-[have_getaddrinfo=no])
+-AC_MSG_RESULT($have_getaddrinfo)
+-if test $have_getaddrinfo = yes
+-then
+- AC_MSG_CHECKING(getaddrinfo bug)
+- AC_CACHE_VAL(ac_cv_buggy_getaddrinfo,
+- AC_RUN_IFELSE([AC_LANG_SOURCE([[[
+-#include <sys/types.h>
+-#include <netdb.h>
+-#include <string.h>
+-#include <sys/socket.h>
+-#include <netinet/in.h>
+-
+-int main()
+-{
+- int passive, gaierr, inet4 = 0, inet6 = 0;
+- struct addrinfo hints, *ai, *aitop;
+- char straddr[INET6_ADDRSTRLEN], strport[16];
+-
+- for (passive = 0; passive <= 1; passive++) {
+- memset(&hints, 0, sizeof(hints));
+- hints.ai_family = AF_UNSPEC;
+- hints.ai_flags = passive ? AI_PASSIVE : 0;
+- hints.ai_socktype = SOCK_STREAM;
+- hints.ai_protocol = IPPROTO_TCP;
+- if ((gaierr = getaddrinfo(NULL, "54321", &hints, &aitop)) != 0) {
+- (void)gai_strerror(gaierr);
+- goto bad;
+- }
+- for (ai = aitop; ai; ai = ai->ai_next) {
+- if (ai->ai_addr == NULL ||
+- ai->ai_addrlen == 0 ||
+- getnameinfo(ai->ai_addr, ai->ai_addrlen,
+- straddr, sizeof(straddr), strport, sizeof(strport),
+- NI_NUMERICHOST|NI_NUMERICSERV) != 0) {
+- goto bad;
+- }
+- switch (ai->ai_family) {
+- case AF_INET:
+- if (strcmp(strport, "54321") != 0) {
+- goto bad;
+- }
+- if (passive) {
+- if (strcmp(straddr, "0.0.0.0") != 0) {
+- goto bad;
+- }
+- } else {
+- if (strcmp(straddr, "127.0.0.1") != 0) {
+- goto bad;
+- }
+- }
+- inet4++;
+- break;
+- case AF_INET6:
+- if (strcmp(strport, "54321") != 0) {
+- goto bad;
+- }
+- if (passive) {
+- if (strcmp(straddr, "::") != 0) {
+- goto bad;
+- }
+- } else {
+- if (strcmp(straddr, "::1") != 0) {
+- goto bad;
+- }
+- }
+- inet6++;
+- break;
+- case AF_UNSPEC:
+- goto bad;
+- break;
+- default:
+- /* another family support? */
+- break;
+- }
+- }
+- }
+-
+- if (!(inet4 == 0 || inet4 == 2))
+- goto bad;
+- if (!(inet6 == 0 || inet6 == 2))
+- goto bad;
+-
+- if (aitop)
+- freeaddrinfo(aitop);
+- return 0;
+-
+- bad:
+- if (aitop)
+- freeaddrinfo(aitop);
+- return 1;
+-}
+-]]])],
+-[ac_cv_buggy_getaddrinfo=no],
+-[ac_cv_buggy_getaddrinfo=yes],
+-[ac_cv_buggy_getaddrinfo=yes]))
+-fi
+-
+-AC_MSG_RESULT($ac_cv_buggy_getaddrinfo)
+-
+-if test $have_getaddrinfo = no -o "$ac_cv_buggy_getaddrinfo" = yes
+-then
+- if test $ipv6 = yes
+- then
+- echo 'Fatal: You must get working getaddrinfo() function.'
+- echo ' or you can specify "--disable-ipv6"'.
+- exit 1
+- fi
+-else
+- AC_DEFINE(HAVE_GETADDRINFO, 1, [Define if you have the getaddrinfo function.])
+-fi
+-
+-AC_CHECK_FUNCS(getnameinfo)
+-
+-# checks for structures
+-AC_HEADER_TIME
+-AC_STRUCT_TM
+-AC_STRUCT_TIMEZONE
+-AC_CHECK_MEMBERS([struct stat.st_rdev])
+-AC_CHECK_MEMBERS([struct stat.st_blksize])
+-AC_CHECK_MEMBERS([struct stat.st_flags])
+-AC_CHECK_MEMBERS([struct stat.st_gen])
+-AC_CHECK_MEMBERS([struct stat.st_birthtime])
+-AC_STRUCT_ST_BLOCKS
+-
+-AC_MSG_CHECKING(for time.h that defines altzone)
+-AC_CACHE_VAL(ac_cv_header_time_altzone,[
+- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <time.h>]], [[return altzone;]])],
+- [ac_cv_header_time_altzone=yes],
+- [ac_cv_header_time_altzone=no])
+- ])
+-AC_MSG_RESULT($ac_cv_header_time_altzone)
+-if test $ac_cv_header_time_altzone = yes; then
+- AC_DEFINE(HAVE_ALTZONE, 1, [Define this if your time.h defines altzone.])
+-fi
+-
+-was_it_defined=no
+-AC_MSG_CHECKING(whether sys/select.h and sys/time.h may both be included)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-#include <sys/types.h>
+-#include <sys/select.h>
+-#include <sys/time.h>
+-]], [[;]])],[
+- AC_DEFINE(SYS_SELECT_WITH_SYS_TIME, 1,
+- [Define if you can safely include both <sys/select.h> and <sys/time.h>
+- (which you can't on SCO ODT 3.0).])
+- was_it_defined=yes
+-],[])
+-AC_MSG_RESULT($was_it_defined)
+-
+-AC_MSG_CHECKING(for addrinfo)
+-AC_CACHE_VAL(ac_cv_struct_addrinfo,
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <netdb.h>]], [[struct addrinfo a]])],
+- [ac_cv_struct_addrinfo=yes],
+- [ac_cv_struct_addrinfo=no]))
+-AC_MSG_RESULT($ac_cv_struct_addrinfo)
+-if test $ac_cv_struct_addrinfo = yes; then
+- AC_DEFINE(HAVE_ADDRINFO, 1, [struct addrinfo (netdb.h)])
+-fi
+-
+-AC_MSG_CHECKING(for sockaddr_storage)
+-AC_CACHE_VAL(ac_cv_struct_sockaddr_storage,
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-# include <sys/types.h>
+-# include <sys/socket.h>]], [[struct sockaddr_storage s]])],
+- [ac_cv_struct_sockaddr_storage=yes],
+- [ac_cv_struct_sockaddr_storage=no]))
+-AC_MSG_RESULT($ac_cv_struct_sockaddr_storage)
+-if test $ac_cv_struct_sockaddr_storage = yes; then
+- AC_DEFINE(HAVE_SOCKADDR_STORAGE, 1, [struct sockaddr_storage (sys/socket.h)])
+-fi
+-
+-# checks for compiler characteristics
+-
+-AC_C_CHAR_UNSIGNED
+-AC_C_CONST
+-
+-works=no
+-AC_MSG_CHECKING(for working volatile)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[volatile int x; x = 0;]])],
+- [works=yes],
+- [AC_DEFINE(volatile, , [Define to empty if the keyword does not work.])]
+-)
+-AC_MSG_RESULT($works)
+-
+-works=no
+-AC_MSG_CHECKING(for working signed char)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[signed char c;]])],
+- [works=yes],
+- [AC_DEFINE(signed, , [Define to empty if the keyword does not work.])]
+-)
+-AC_MSG_RESULT($works)
+-
+-have_prototypes=no
+-AC_MSG_CHECKING(for prototypes)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[int foo(int x) { return 0; }]], [[return foo(10);]])],
+- [AC_DEFINE(HAVE_PROTOTYPES, 1,
+- [Define if your compiler supports function prototype])
+- have_prototypes=yes],
+- []
+-)
+-AC_MSG_RESULT($have_prototypes)
+-
+-works=no
+-AC_MSG_CHECKING(for variable length prototypes and stdarg.h)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-#include <stdarg.h>
+-int foo(int x, ...) {
+- va_list va;
+- va_start(va, x);
+- va_arg(va, int);
+- va_arg(va, char *);
+- va_arg(va, double);
+- return 0;
+-}
+-]], [[return foo(10, "", 3.14);]])],[
+- AC_DEFINE(HAVE_STDARG_PROTOTYPES, 1,
+- [Define if your compiler supports variable length function prototypes
+- (e.g. void fprintf(FILE *, char *, ...);) *and* <stdarg.h>])
+- works=yes
+-],[])
+-AC_MSG_RESULT($works)
+-
+-# check for socketpair
+-AC_MSG_CHECKING(for socketpair)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-#include <sys/types.h>
+-#include <sys/socket.h>
+-]], [[void *x=socketpair]])],
+- [AC_DEFINE(HAVE_SOCKETPAIR, 1, [Define if you have the 'socketpair' function.])
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)]
+-)
+-
+-# check if sockaddr has sa_len member
+-AC_MSG_CHECKING(if sockaddr has sa_len member)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/types.h>
+-#include <sys/socket.h>]], [[struct sockaddr x;
+-x.sa_len = 0;]])],
+- [AC_MSG_RESULT(yes)
+- AC_DEFINE(HAVE_SOCKADDR_SA_LEN, 1, [Define if sockaddr has sa_len member])],
+- [AC_MSG_RESULT(no)]
+-)
+-
+-va_list_is_array=no
+-AC_MSG_CHECKING(whether va_list is an array)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-#ifdef HAVE_STDARG_PROTOTYPES
+-#include <stdarg.h>
+-#else
+-#include <varargs.h>
+-#endif
+-]], [[va_list list1, list2; list1 = list2;]])],[],[
+- AC_DEFINE(VA_LIST_IS_ARRAY, 1, [Define if a va_list is an array of some kind])
+- va_list_is_array=yes
+-])
+-AC_MSG_RESULT($va_list_is_array)
+-
+-# sigh -- gethostbyname_r is a mess; it can have 3, 5 or 6 arguments :-(
+-AH_TEMPLATE(HAVE_GETHOSTBYNAME_R,
+- [Define this if you have some version of gethostbyname_r()])
+-
+-AC_CHECK_FUNC(gethostbyname_r, [
+- AC_DEFINE(HAVE_GETHOSTBYNAME_R)
+- AC_MSG_CHECKING([gethostbyname_r with 6 args])
+- OLD_CFLAGS=$CFLAGS
+- CFLAGS="$CFLAGS $MY_CPPFLAGS $MY_THREAD_CPPFLAGS $MY_CFLAGS"
+- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-# include <netdb.h>
+- ]], [[
+- char *name;
+- struct hostent *he, *res;
+- char buffer[2048];
+- int buflen = 2048;
+- int h_errnop;
+-
+- (void) gethostbyname_r(name, he, buffer, buflen, &res, &h_errnop)
+- ]])],[
+- AC_DEFINE(HAVE_GETHOSTBYNAME_R)
+- AC_DEFINE(HAVE_GETHOSTBYNAME_R_6_ARG, 1,
+- [Define this if you have the 6-arg version of gethostbyname_r().])
+- AC_MSG_RESULT(yes)
+- ],[
+- AC_MSG_RESULT(no)
+- AC_MSG_CHECKING([gethostbyname_r with 5 args])
+- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-# include <netdb.h>
+- ]], [[
+- char *name;
+- struct hostent *he;
+- char buffer[2048];
+- int buflen = 2048;
+- int h_errnop;
+-
+- (void) gethostbyname_r(name, he, buffer, buflen, &h_errnop)
+- ]])],
+- [
+- AC_DEFINE(HAVE_GETHOSTBYNAME_R)
+- AC_DEFINE(HAVE_GETHOSTBYNAME_R_5_ARG, 1,
+- [Define this if you have the 5-arg version of gethostbyname_r().])
+- AC_MSG_RESULT(yes)
+- ], [
+- AC_MSG_RESULT(no)
+- AC_MSG_CHECKING([gethostbyname_r with 3 args])
+- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-# include <netdb.h>
+- ]], [[
+- char *name;
+- struct hostent *he;
+- struct hostent_data data;
+-
+- (void) gethostbyname_r(name, he, &data);
+- ]])],
+- [
+- AC_DEFINE(HAVE_GETHOSTBYNAME_R)
+- AC_DEFINE(HAVE_GETHOSTBYNAME_R_3_ARG, 1,
+- [Define this if you have the 3-arg version of gethostbyname_r().])
+- AC_MSG_RESULT(yes)
+- ], [
+- AC_MSG_RESULT(no)
+- ])
+- ])
+- ])
+- CFLAGS=$OLD_CFLAGS
+-], [
+- AC_CHECK_FUNCS(gethostbyname)
+-])
+-AC_SUBST(HAVE_GETHOSTBYNAME_R_6_ARG)
+-AC_SUBST(HAVE_GETHOSTBYNAME_R_5_ARG)
+-AC_SUBST(HAVE_GETHOSTBYNAME_R_3_ARG)
+-AC_SUBST(HAVE_GETHOSTBYNAME_R)
+-AC_SUBST(HAVE_GETHOSTBYNAME)
+-
+-# checks for system services
+-# (none yet)
+-
+-# Linux requires this for correct f.p. operations
+-AC_CHECK_FUNC(__fpu_control,
+- [],
+- [AC_CHECK_LIB(ieee, __fpu_control)
+-])
+-
+-# Check for --with-fpectl
+-AC_MSG_CHECKING(for --with-fpectl)
+-AC_ARG_WITH(fpectl,
+- AS_HELP_STRING([--with-fpectl], [enable SIGFPE catching]),
+-[
+-if test "$withval" != no
+-then
+- AC_DEFINE(WANT_SIGFPE_HANDLER, 1,
+- [Define if you want SIGFPE handled (see Include/pyfpe.h).])
+- AC_MSG_RESULT(yes)
+-else AC_MSG_RESULT(no)
+-fi],
+-[AC_MSG_RESULT(no)])
+-
+-# check for --with-libm=...
+-AC_SUBST(LIBM)
+-case $ac_sys_system in
+-Darwin) ;;
+-BeOS) ;;
+-*) LIBM=-lm
+-esac
+-AC_MSG_CHECKING(for --with-libm=STRING)
+-AC_ARG_WITH(libm,
+- AS_HELP_STRING([--with-libm=STRING], [math library]),
+-[
+-if test "$withval" = no
+-then LIBM=
+- AC_MSG_RESULT(force LIBM empty)
+-elif test "$withval" != yes
+-then LIBM=$withval
+- AC_MSG_RESULT(set LIBM="$withval")
+-else AC_MSG_ERROR([proper usage is --with-libm=STRING])
+-fi],
+-[AC_MSG_RESULT(default LIBM="$LIBM")])
+-
+-# check for --with-libc=...
+-AC_SUBST(LIBC)
+-AC_MSG_CHECKING(for --with-libc=STRING)
+-AC_ARG_WITH(libc,
+- AS_HELP_STRING([--with-libc=STRING], [C library]),
+-[
+-if test "$withval" = no
+-then LIBC=
+- AC_MSG_RESULT(force LIBC empty)
+-elif test "$withval" != yes
+-then LIBC=$withval
+- AC_MSG_RESULT(set LIBC="$withval")
+-else AC_MSG_ERROR([proper usage is --with-libc=STRING])
+-fi],
+-[AC_MSG_RESULT(default LIBC="$LIBC")])
+-
+-# **************************************************
+-# * Check for various properties of floating point *
+-# **************************************************
+-
+-AC_MSG_CHECKING(whether C doubles are little-endian IEEE 754 binary64)
+-AC_CACHE_VAL(ac_cv_little_endian_double, [
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <string.h>
+-int main() {
+- double x = 9006104071832581.0;
+- if (memcmp(&x, "\x05\x04\x03\x02\x01\xff\x3f\x43", 8) == 0)
+- return 0;
+- else
+- return 1;
+-}
+-]])],
+-[ac_cv_little_endian_double=yes],
+-[ac_cv_little_endian_double=no],
+-[ac_cv_little_endian_double=no])])
+-AC_MSG_RESULT($ac_cv_little_endian_double)
+-if test "$ac_cv_little_endian_double" = yes
+-then
+- AC_DEFINE(DOUBLE_IS_LITTLE_ENDIAN_IEEE754, 1,
+- [Define if C doubles are 64-bit IEEE 754 binary format, stored
+- with the least significant byte first])
+-fi
+-
+-AC_MSG_CHECKING(whether C doubles are big-endian IEEE 754 binary64)
+-AC_CACHE_VAL(ac_cv_big_endian_double, [
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <string.h>
+-int main() {
+- double x = 9006104071832581.0;
+- if (memcmp(&x, "\x43\x3f\xff\x01\x02\x03\x04\x05", 8) == 0)
+- return 0;
+- else
+- return 1;
+-}
+-]])],
+-[ac_cv_big_endian_double=yes],
+-[ac_cv_big_endian_double=no],
+-[ac_cv_big_endian_double=no])])
+-AC_MSG_RESULT($ac_cv_big_endian_double)
+-if test "$ac_cv_big_endian_double" = yes
+-then
+- AC_DEFINE(DOUBLE_IS_BIG_ENDIAN_IEEE754, 1,
+- [Define if C doubles are 64-bit IEEE 754 binary format, stored
+- with the most significant byte first])
+-fi
+-
+-# Some ARM platforms use a mixed-endian representation for doubles.
+-# While Python doesn't currently have full support for these platforms
+-# (see e.g., issue 1762561), we can at least make sure that float <-> string
+-# conversions work.
+-AC_MSG_CHECKING(whether C doubles are ARM mixed-endian IEEE 754 binary64)
+-AC_CACHE_VAL(ac_cv_mixed_endian_double, [
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <string.h>
+-int main() {
+- double x = 9006104071832581.0;
+- if (memcmp(&x, "\x01\xff\x3f\x43\x05\x04\x03\x02", 8) == 0)
+- return 0;
+- else
+- return 1;
+-}
+-]])],
+-[ac_cv_mixed_endian_double=yes],
+-[ac_cv_mixed_endian_double=no],
+-[ac_cv_mixed_endian_double=no])])
+-AC_MSG_RESULT($ac_cv_mixed_endian_double)
+-if test "$ac_cv_mixed_endian_double" = yes
+-then
+- AC_DEFINE(DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754, 1,
+- [Define if C doubles are 64-bit IEEE 754 binary format, stored
+- in ARM mixed-endian order (byte order 45670123)])
+-fi
+-
+-# The short float repr introduced in Python 3.1 requires the
+-# correctly-rounded string <-> double conversion functions from
+-# Python/dtoa.c, which in turn require that the FPU uses 53-bit
+-# rounding; this is a problem on x86, where the x87 FPU has a default
+-# rounding precision of 64 bits. For gcc/x86, we can fix this by
+-# using inline assembler to get and set the x87 FPU control word.
+-
+-# This inline assembler syntax may also work for suncc and icc,
+-# so we try it on all platforms.
+-
+-AC_MSG_CHECKING(whether we can use gcc inline assembler to get and set x87 control word)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], [[
+- unsigned short cw;
+- __asm__ __volatile__ ("fnstcw %0" : "=m" (cw));
+- __asm__ __volatile__ ("fldcw %0" : : "m" (cw));
+-]])],[have_gcc_asm_for_x87=yes],[have_gcc_asm_for_x87=no])
+-AC_MSG_RESULT($have_gcc_asm_for_x87)
+-if test "$have_gcc_asm_for_x87" = yes
+-then
+- AC_DEFINE(HAVE_GCC_ASM_FOR_X87, 1,
+- [Define if we can use gcc inline assembler to get and set x87 control word])
+-fi
+-
+-# Detect whether system arithmetic is subject to x87-style double
+-# rounding issues. The result of this test has little meaning on non
+-# IEEE 754 platforms. On IEEE 754, test should return 1 if rounding
+-# mode is round-to-nearest and double rounding issues are present, and
+-# 0 otherwise. See http://bugs.python.org/issue2937 for more info.
+-AC_MSG_CHECKING(for x87-style double rounding)
+-# $BASECFLAGS may affect the result
+-ac_save_cc="$CC"
+-CC="$CC $BASECFLAGS"
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <stdlib.h>
+-#include <math.h>
+-int main() {
+- volatile double x, y, z;
+- /* 1./(1-2**-53) -> 1+2**-52 (correct), 1.0 (double rounding) */
+- x = 0.99999999999999989; /* 1-2**-53 */
+- y = 1./x;
+- if (y != 1.)
+- exit(0);
+- /* 1e16+2.99999 -> 1e16+2. (correct), 1e16+4. (double rounding) */
+- x = 1e16;
+- y = 2.99999;
+- z = x + y;
+- if (z != 1e16+4.)
+- exit(0);
+- /* both tests show evidence of double rounding */
+- exit(1);
+-}
+-]])],
+-[ac_cv_x87_double_rounding=no],
+-[ac_cv_x87_double_rounding=yes],
+-[ac_cv_x87_double_rounding=no])
+-CC="$ac_save_cc"
+-AC_MSG_RESULT($ac_cv_x87_double_rounding)
+-if test "$ac_cv_x87_double_rounding" = yes
+-then
+- AC_DEFINE(X87_DOUBLE_ROUNDING, 1,
+- [Define if arithmetic is subject to x87-style double rounding issue])
+-fi
+-
+-# ************************************
+-# * Check for mathematical functions *
+-# ************************************
+-
+-LIBS_SAVE=$LIBS
+-LIBS="$LIBS $LIBM"
+-
+-# On FreeBSD 6.2, it appears that tanh(-0.) returns 0. instead of
+-# -0. on some architectures.
+-AC_MSG_CHECKING(whether tanh preserves the sign of zero)
+-AC_CACHE_VAL(ac_cv_tanh_preserves_zero_sign, [
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <math.h>
+-#include <stdlib.h>
+-int main() {
+- /* return 0 if either negative zeros don't exist
+- on this platform or if negative zeros exist
+- and tanh(-0.) == -0. */
+- if (atan2(0., -1.) == atan2(-0., -1.) ||
+- atan2(tanh(-0.), -1.) == atan2(-0., -1.)) exit(0);
+- else exit(1);
+-}
+-]])],
+-[ac_cv_tanh_preserves_zero_sign=yes],
+-[ac_cv_tanh_preserves_zero_sign=no],
+-[ac_cv_tanh_preserves_zero_sign=no])])
+-AC_MSG_RESULT($ac_cv_tanh_preserves_zero_sign)
+-if test "$ac_cv_tanh_preserves_zero_sign" = yes
+-then
+- AC_DEFINE(TANH_PRESERVES_ZERO_SIGN, 1,
+- [Define if tanh(-0.) is -0., or if platform doesn't have signed zeros])
+-fi
+-
+-AC_CHECK_FUNCS([acosh asinh atanh copysign erf erfc expm1 finite gamma])
+-AC_CHECK_FUNCS([hypot lgamma log1p round tgamma])
+-AC_CHECK_DECLS([isinf, isnan, isfinite], [], [], [[#include <math.h>]])
+-
+-LIBS=$LIBS_SAVE
+-
+-# For multiprocessing module, check that sem_open
+-# actually works. For FreeBSD versions <= 7.2,
+-# the kernel module that provides POSIX semaphores
+-# isn't loaded by default, so an attempt to call
+-# sem_open results in a 'Signal 12' error.
+-AC_MSG_CHECKING(whether POSIX semaphores are enabled)
+-AC_CACHE_VAL(ac_cv_posix_semaphores_enabled,
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <unistd.h>
+-#include <fcntl.h>
+-#include <stdio.h>
+-#include <semaphore.h>
+-#include <sys/stat.h>
+-
+-int main(void) {
+- sem_t *a = sem_open("/autoconf", O_CREAT, S_IRUSR|S_IWUSR, 0);
+- if (a == SEM_FAILED) {
+- perror("sem_open");
+- return 1;
+- }
+- sem_close(a);
+- sem_unlink("/autoconf");
+- return 0;
+-}
+-]])],
+-[ac_cv_posix_semaphores_enabled=yes],
+-[ac_cv_posix_semaphores_enabled=no],
+-[ac_cv_posix_semaphores_enabled=yes])
+-)
+-AC_MSG_RESULT($ac_cv_posix_semaphores_enabled)
+-if test $ac_cv_posix_semaphores_enabled = no
+-then
+- AC_DEFINE(POSIX_SEMAPHORES_NOT_ENABLED, 1,
+- [Define if POSIX semaphores aren't enabled on your system])
+-fi
+-
+-# Multiprocessing check for broken sem_getvalue
+-AC_MSG_CHECKING(for broken sem_getvalue)
+-AC_CACHE_VAL(ac_cv_broken_sem_getvalue,
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <unistd.h>
+-#include <fcntl.h>
+-#include <stdio.h>
+-#include <semaphore.h>
+-#include <sys/stat.h>
+-
+-int main(void){
+- sem_t *a = sem_open("/autocftw", O_CREAT, S_IRUSR|S_IWUSR, 0);
+- int count;
+- int res;
+- if(a==SEM_FAILED){
+- perror("sem_open");
+- return 1;
+-
+- }
+- res = sem_getvalue(a, &count);
+- sem_close(a);
+- sem_unlink("/autocftw");
+- return res==-1 ? 1 : 0;
+-}
+-]])],
+-[ac_cv_broken_sem_getvalue=no],
+-[ac_cv_broken_sem_getvalue=yes],
+-[ac_cv_broken_sem_getvalue=yes])
+-)
+-AC_MSG_RESULT($ac_cv_broken_sem_getvalue)
+-if test $ac_cv_broken_sem_getvalue = yes
+-then
+- AC_DEFINE(HAVE_BROKEN_SEM_GETVALUE, 1,
+- [define to 1 if your sem_getvalue is broken.])
+-fi
+-
+-# determine what size digit to use for Python's longs
+-AC_MSG_CHECKING([digit size for Python's longs])
+-AC_ARG_ENABLE(big-digits,
+-AS_HELP_STRING([--enable-big-digits@<:@=BITS@:>@],[use big digits for Python longs [[BITS=30]]]),
+-[case $enable_big_digits in
+-yes)
+- enable_big_digits=30 ;;
+-no)
+- enable_big_digits=15 ;;
+-[15|30])
+- ;;
+-*)
+- AC_MSG_ERROR([bad value $enable_big_digits for --enable-big-digits; value should be 15 or 30]) ;;
+-esac
+-AC_MSG_RESULT($enable_big_digits)
+-AC_DEFINE_UNQUOTED(PYLONG_BITS_IN_DIGIT, $enable_big_digits, [Define as the preferred size in bits of long digits])
+-],
+-[AC_MSG_RESULT(no value specified)])
+-
+-# check for wchar.h
+-AC_CHECK_HEADER(wchar.h, [
+- AC_DEFINE(HAVE_WCHAR_H, 1,
+- [Define if the compiler provides a wchar.h header file.])
+- wchar_h="yes"
+-],
+-wchar_h="no"
+-)
+-
+-# determine wchar_t size
+-if test "$wchar_h" = yes
+-then
+- AC_CHECK_SIZEOF(wchar_t, 4, [#include <wchar.h>])
+-fi
+-
+-AC_MSG_CHECKING(for UCS-4 tcl)
+-have_ucs4_tcl=no
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+-#include <tcl.h>
+-#if TCL_UTF_MAX != 6
+-# error "NOT UCS4_TCL"
+-#endif]], [[]])],[
+- AC_DEFINE(HAVE_UCS4_TCL, 1, [Define this if you have tcl and TCL_UTF_MAX==6])
+- have_ucs4_tcl=yes
+-],[])
+-AC_MSG_RESULT($have_ucs4_tcl)
+-
+-# check whether wchar_t is signed or not
+-if test "$wchar_h" = yes
+-then
+- # check whether wchar_t is signed or not
+- AC_MSG_CHECKING(whether wchar_t is signed)
+- AC_CACHE_VAL(ac_cv_wchar_t_signed, [
+- AC_RUN_IFELSE([AC_LANG_SOURCE([[
+- #include <wchar.h>
+- int main()
+- {
+- /* Success: exit code 0 */
+- exit((((wchar_t) -1) < ((wchar_t) 0)) ? 0 : 1);
+- }
+- ]])],
+- [ac_cv_wchar_t_signed=yes],
+- [ac_cv_wchar_t_signed=no],
+- [ac_cv_wchar_t_signed=yes])])
+- AC_MSG_RESULT($ac_cv_wchar_t_signed)
+-fi
+-
+-AC_MSG_CHECKING(what type to use for unicode)
+-dnl quadrigraphs "@<:@" and "@:>@" produce "[" and "]" in the output
+-AC_ARG_ENABLE(unicode,
+- AS_HELP_STRING([--enable-unicode@<:@=ucs@<:@24@:>@@:>@], [Enable Unicode strings (default is ucs2)]),
+- [],
+- [enable_unicode=yes])
+-
+-if test $enable_unicode = yes
+-then
+- # Without any arguments, Py_UNICODE defaults to two-byte mode
+- case "$have_ucs4_tcl" in
+- yes) enable_unicode="ucs4"
+- ;;
+- *) enable_unicode="ucs2"
+- ;;
+- esac
+-fi
+-
+-AH_TEMPLATE(Py_UNICODE_SIZE,
+- [Define as the size of the unicode type.])
+-case "$enable_unicode" in
+-ucs2) unicode_size="2"
+- AC_DEFINE(Py_UNICODE_SIZE,2)
+- ;;
+-ucs4) unicode_size="4"
+- AC_DEFINE(Py_UNICODE_SIZE,4)
+- ;;
+-*) AC_MSG_ERROR([invalid value for --enable-unicode. Use either ucs2 or ucs4 (lowercase).]) ;;
+-esac
+-
+-AH_TEMPLATE(PY_UNICODE_TYPE,
+- [Define as the integral type used for Unicode representation.])
+-
+-AC_SUBST(UNICODE_OBJS)
+-if test "$enable_unicode" = "no"
+-then
+- UNICODE_OBJS=""
+- AC_MSG_RESULT(not used)
+-else
+- UNICODE_OBJS="Objects/unicodeobject.o Objects/unicodectype.o"
+- AC_DEFINE(Py_USING_UNICODE, 1,
+- [Define if you want to have a Unicode type.])
+-
+- # wchar_t is only usable if it maps to an unsigned type
+- if test "$unicode_size" = "$ac_cv_sizeof_wchar_t" \
+- -a "$ac_cv_wchar_t_signed" = "no"
+- then
+- PY_UNICODE_TYPE="wchar_t"
+- AC_DEFINE(HAVE_USABLE_WCHAR_T, 1,
+- [Define if you have a useable wchar_t type defined in wchar.h; useable
+- means wchar_t must be an unsigned type with at least 16 bits. (see
+- Include/unicodeobject.h).])
+- AC_DEFINE(PY_UNICODE_TYPE,wchar_t)
+- elif test "$ac_cv_sizeof_short" = "$unicode_size"
+- then
+- PY_UNICODE_TYPE="unsigned short"
+- AC_DEFINE(PY_UNICODE_TYPE,unsigned short)
+- elif test "$ac_cv_sizeof_long" = "$unicode_size"
+- then
+- PY_UNICODE_TYPE="unsigned long"
+- AC_DEFINE(PY_UNICODE_TYPE,unsigned long)
+- else
+- PY_UNICODE_TYPE="no type found"
+- fi
+- AC_MSG_RESULT($PY_UNICODE_TYPE)
+-fi
+-
+-# check for endianness
+-AC_C_BIGENDIAN
+-
+-# Check whether right shifting a negative integer extends the sign bit
+-# or fills with zeros (like the Cray J90, according to Tim Peters).
+-AC_MSG_CHECKING(whether right shift extends the sign bit)
+-AC_CACHE_VAL(ac_cv_rshift_extends_sign, [
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-int main()
+-{
+- exit(((-1)>>3 == -1) ? 0 : 1);
+-}
+-]])],
+-[ac_cv_rshift_extends_sign=yes],
+-[ac_cv_rshift_extends_sign=no],
+-[ac_cv_rshift_extends_sign=yes])])
+-AC_MSG_RESULT($ac_cv_rshift_extends_sign)
+-if test "$ac_cv_rshift_extends_sign" = no
+-then
+- AC_DEFINE(SIGNED_RIGHT_SHIFT_ZERO_FILLS, 1,
+- [Define if i>>j for signed int i does not extend the sign bit
+- when i < 0])
+-fi
+-
+-# check for getc_unlocked and related locking functions
+-AC_MSG_CHECKING(for getc_unlocked() and friends)
+-AC_CACHE_VAL(ac_cv_have_getc_unlocked, [
+-AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <stdio.h>]], [[
+- FILE *f = fopen("/dev/null", "r");
+- flockfile(f);
+- getc_unlocked(f);
+- funlockfile(f);
+-]])],[ac_cv_have_getc_unlocked=yes],[ac_cv_have_getc_unlocked=no])])
+-AC_MSG_RESULT($ac_cv_have_getc_unlocked)
+-if test "$ac_cv_have_getc_unlocked" = yes
+-then
+- AC_DEFINE(HAVE_GETC_UNLOCKED, 1,
+- [Define this if you have flockfile(), getc_unlocked(), and funlockfile()])
+-fi
+-
+-# check where readline lives
+-# save the value of LIBS so we don't actually link Python with readline
+-LIBS_no_readline=$LIBS
+-
+-# On some systems we need to link readline to a termcap compatible
+-# library. NOTE: Keep the precedence of listed libraries synchronised
+-# with setup.py.
+-py_cv_lib_readline=no
+-AC_MSG_CHECKING([how to link readline libs])
+-for py_libtermcap in "" ncursesw ncurses curses termcap; do
+- if test -z "$py_libtermcap"; then
+- READLINE_LIBS="-lreadline"
+- else
+- READLINE_LIBS="-lreadline -l$py_libtermcap"
+- fi
+- LIBS="$READLINE_LIBS $LIBS_no_readline"
+- AC_LINK_IFELSE(
+- [AC_LANG_CALL([],[readline])],
+- [py_cv_lib_readline=yes])
+- if test $py_cv_lib_readline = yes; then
+- break
+- fi
+-done
+-# Uncomment this line if you want to use READINE_LIBS in Makefile or scripts
+-#AC_SUBST([READLINE_LIBS])
+-if test $py_cv_lib_readline = no; then
+- AC_MSG_RESULT([none])
+-else
+- AC_MSG_RESULT([$READLINE_LIBS])
+- AC_DEFINE(HAVE_LIBREADLINE, 1,
+- [Define if you have the readline library (-lreadline).])
+-fi
+-
+-# check for readline 2.1
+-AC_CHECK_LIB(readline, rl_callback_handler_install,
+- AC_DEFINE(HAVE_RL_CALLBACK, 1,
+- [Define if you have readline 2.1]), ,$READLINE_LIBS)
+-
+-# check for readline 2.2
+-AC_PREPROC_IFELSE([AC_LANG_SOURCE([[#include <readline/readline.h>]])],
+- [have_readline=yes],
+- [have_readline=no]
+-)
+-if test $have_readline = yes
+-then
+- AC_EGREP_HEADER([extern int rl_completion_append_character;],
+- [readline/readline.h],
+- AC_DEFINE(HAVE_RL_COMPLETION_APPEND_CHARACTER, 1,
+- [Define if you have readline 2.2]), )
+- AC_EGREP_HEADER([extern int rl_completion_suppress_append;],
+- [readline/readline.h],
+- AC_DEFINE(HAVE_RL_COMPLETION_SUPPRESS_APPEND, 1,
+- [Define if you have rl_completion_suppress_append]), )
+-fi
+-
+-# check for readline 4.0
+-AC_CHECK_LIB(readline, rl_pre_input_hook,
+- AC_DEFINE(HAVE_RL_PRE_INPUT_HOOK, 1,
+- [Define if you have readline 4.0]), ,$READLINE_LIBS)
+-
+-# also in 4.0
+-AC_CHECK_LIB(readline, rl_completion_display_matches_hook,
+- AC_DEFINE(HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK, 1,
+- [Define if you have readline 4.0]), ,$READLINE_LIBS)
+-
+-# check for readline 4.2
+-AC_CHECK_LIB(readline, rl_completion_matches,
+- AC_DEFINE(HAVE_RL_COMPLETION_MATCHES, 1,
+- [Define if you have readline 4.2]), ,$READLINE_LIBS)
+-
+-# also in readline 4.2
+-AC_PREPROC_IFELSE([AC_LANG_SOURCE([[#include <readline/readline.h>]])],
+- [have_readline=yes],
+- [have_readline=no]
+-)
+-if test $have_readline = yes
+-then
+- AC_EGREP_HEADER([extern int rl_catch_signals;],
+- [readline/readline.h],
+- AC_DEFINE(HAVE_RL_CATCH_SIGNAL, 1,
+- [Define if you can turn off readline's signal handling.]), )
+-fi
+-
+-# End of readline checks: restore LIBS
+-LIBS=$LIBS_no_readline
+-
+-AC_MSG_CHECKING(for broken nice())
+-AC_CACHE_VAL(ac_cv_broken_nice, [
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-int main()
+-{
+- int val1 = nice(1);
+- if (val1 != -1 && val1 == nice(2))
+- exit(0);
+- exit(1);
+-}
+-]])],
+-[ac_cv_broken_nice=yes],
+-[ac_cv_broken_nice=no],
+-[ac_cv_broken_nice=no])])
+-AC_MSG_RESULT($ac_cv_broken_nice)
+-if test "$ac_cv_broken_nice" = yes
+-then
+- AC_DEFINE(HAVE_BROKEN_NICE, 1,
+- [Define if nice() returns success/failure instead of the new priority.])
+-fi
+-
+-AC_MSG_CHECKING(for broken poll())
+-AC_CACHE_VAL(ac_cv_broken_poll,
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <poll.h>
+-
+-int main()
+-{
+- struct pollfd poll_struct = { 42, POLLIN|POLLPRI|POLLOUT, 0 };
+- int poll_test;
+-
+- close (42);
+-
+- poll_test = poll(&poll_struct, 1, 0);
+- if (poll_test < 0)
+- return 0;
+- else if (poll_test == 0 && poll_struct.revents != POLLNVAL)
+- return 0;
+- else
+- return 1;
+-}
+-]])],
+-[ac_cv_broken_poll=yes],
+-[ac_cv_broken_poll=no],
+-[ac_cv_broken_poll=no]))
+-AC_MSG_RESULT($ac_cv_broken_poll)
+-if test "$ac_cv_broken_poll" = yes
+-then
+- AC_DEFINE(HAVE_BROKEN_POLL, 1,
+- [Define if poll() sets errno on invalid file descriptors.])
+-fi
+-
+-# Before we can test tzset, we need to check if struct tm has a tm_zone
+-# (which is not required by ISO C or UNIX spec) and/or if we support
+-# tzname[]
+-AC_STRUCT_TIMEZONE
+-
+-# check tzset(3) exists and works like we expect it to
+-AC_MSG_CHECKING(for working tzset())
+-AC_CACHE_VAL(ac_cv_working_tzset, [
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <stdlib.h>
+-#include <time.h>
+-#include <string.h>
+-
+-#if HAVE_TZNAME
+-extern char *tzname[];
+-#endif
+-
+-int main()
+-{
+- /* Note that we need to ensure that not only does tzset(3)
+- do 'something' with localtime, but it works as documented
+- in the library reference and as expected by the test suite.
+- This includes making sure that tzname is set properly if
+- tm->tm_zone does not exist since it is the alternative way
+- of getting timezone info.
+-
+- Red Hat 6.2 doesn't understand the southern hemisphere
+- after New Year's Day.
+- */
+-
+- time_t groundhogday = 1044144000; /* GMT-based */
+- time_t midyear = groundhogday + (365 * 24 * 3600 / 2);
+-
+- putenv("TZ=UTC+0");
+- tzset();
+- if (localtime(&groundhogday)->tm_hour != 0)
+- exit(1);
+-#if HAVE_TZNAME
+- /* For UTC, tzname[1] is sometimes "", sometimes " " */
+- if (strcmp(tzname[0], "UTC") ||
+- (tzname[1][0] != 0 && tzname[1][0] != ' '))
+- exit(1);
+-#endif
+-
+- putenv("TZ=EST+5EDT,M4.1.0,M10.5.0");
+- tzset();
+- if (localtime(&groundhogday)->tm_hour != 19)
+- exit(1);
+-#if HAVE_TZNAME
+- if (strcmp(tzname[0], "EST") || strcmp(tzname[1], "EDT"))
+- exit(1);
+-#endif
+-
+- putenv("TZ=AEST-10AEDT-11,M10.5.0,M3.5.0");
+- tzset();
+- if (localtime(&groundhogday)->tm_hour != 11)
+- exit(1);
+-#if HAVE_TZNAME
+- if (strcmp(tzname[0], "AEST") || strcmp(tzname[1], "AEDT"))
+- exit(1);
+-#endif
+-
+-#if HAVE_STRUCT_TM_TM_ZONE
+- if (strcmp(localtime(&groundhogday)->tm_zone, "AEDT"))
+- exit(1);
+- if (strcmp(localtime(&midyear)->tm_zone, "AEST"))
+- exit(1);
+-#endif
+-
+- exit(0);
+-}
+-]])],
+-[ac_cv_working_tzset=yes],
+-[ac_cv_working_tzset=no],
+-[ac_cv_working_tzset=no])])
+-AC_MSG_RESULT($ac_cv_working_tzset)
+-if test "$ac_cv_working_tzset" = yes
+-then
+- AC_DEFINE(HAVE_WORKING_TZSET, 1,
+- [Define if tzset() actually switches the local timezone in a meaningful way.])
+-fi
+-
+-# Look for subsecond timestamps in struct stat
+-AC_MSG_CHECKING(for tv_nsec in struct stat)
+-AC_CACHE_VAL(ac_cv_stat_tv_nsec,
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/stat.h>]], [[
+-struct stat st;
+-st.st_mtim.tv_nsec = 1;
+-]])],
+-[ac_cv_stat_tv_nsec=yes],
+-[ac_cv_stat_tv_nsec=no]))
+-AC_MSG_RESULT($ac_cv_stat_tv_nsec)
+-if test "$ac_cv_stat_tv_nsec" = yes
+-then
+- AC_DEFINE(HAVE_STAT_TV_NSEC, 1,
+- [Define if you have struct stat.st_mtim.tv_nsec])
+-fi
+-
+-# Look for BSD style subsecond timestamps in struct stat
+-AC_MSG_CHECKING(for tv_nsec2 in struct stat)
+-AC_CACHE_VAL(ac_cv_stat_tv_nsec2,
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/stat.h>]], [[
+-struct stat st;
+-st.st_mtimespec.tv_nsec = 1;
+-]])],
+-[ac_cv_stat_tv_nsec2=yes],
+-[ac_cv_stat_tv_nsec2=no]))
+-AC_MSG_RESULT($ac_cv_stat_tv_nsec2)
+-if test "$ac_cv_stat_tv_nsec2" = yes
+-then
+- AC_DEFINE(HAVE_STAT_TV_NSEC2, 1,
+- [Define if you have struct stat.st_mtimensec])
+-fi
+-
+-# On HP/UX 11.0, mvwdelch is a block with a return statement
+-AC_MSG_CHECKING(whether mvwdelch is an expression)
+-AC_CACHE_VAL(ac_cv_mvwdelch_is_expression,
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <curses.h>]], [[
+- int rtn;
+- rtn = mvwdelch(0,0,0);
+-]])],
+-[ac_cv_mvwdelch_is_expression=yes],
+-[ac_cv_mvwdelch_is_expression=no]))
+-AC_MSG_RESULT($ac_cv_mvwdelch_is_expression)
+-
+-if test "$ac_cv_mvwdelch_is_expression" = yes
+-then
+- AC_DEFINE(MVWDELCH_IS_EXPRESSION, 1,
+- [Define if mvwdelch in curses.h is an expression.])
+-fi
+-
+-AC_MSG_CHECKING(whether WINDOW has _flags)
+-AC_CACHE_VAL(ac_cv_window_has_flags,
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <curses.h>]], [[
+- WINDOW *w;
+- w->_flags = 0;
+-]])],
+-[ac_cv_window_has_flags=yes],
+-[ac_cv_window_has_flags=no]))
+-AC_MSG_RESULT($ac_cv_window_has_flags)
+-
+-
+-if test "$ac_cv_window_has_flags" = yes
+-then
+- AC_DEFINE(WINDOW_HAS_FLAGS, 1,
+- [Define if WINDOW in curses.h offers a field _flags.])
+-fi
+-
+-AC_MSG_CHECKING(for is_term_resized)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <curses.h>]], [[void *x=is_term_resized]])],
+- [AC_DEFINE(HAVE_CURSES_IS_TERM_RESIZED, 1, Define if you have the 'is_term_resized' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)]
+-)
+-
+-AC_MSG_CHECKING(for resize_term)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <curses.h>]], [[void *x=resize_term]])],
+- [AC_DEFINE(HAVE_CURSES_RESIZE_TERM, 1, Define if you have the 'resize_term' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)]
+-)
+-
+-AC_MSG_CHECKING(for resizeterm)
+-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <curses.h>]], [[void *x=resizeterm]])],
+- [AC_DEFINE(HAVE_CURSES_RESIZETERM, 1, Define if you have the 'resizeterm' function.)
+- AC_MSG_RESULT(yes)],
+- [AC_MSG_RESULT(no)]
+-)
+-
+-AC_MSG_CHECKING(for /dev/ptmx)
+-
+-if test -r /dev/ptmx
+-then
+- AC_MSG_RESULT(yes)
+- AC_DEFINE(HAVE_DEV_PTMX, 1,
+- [Define if we have /dev/ptmx.])
+-else
+- AC_MSG_RESULT(no)
+-fi
+-
+-AC_MSG_CHECKING(for /dev/ptc)
+-
+-if test -r /dev/ptc
+-then
+- AC_MSG_RESULT(yes)
+- AC_DEFINE(HAVE_DEV_PTC, 1,
+- [Define if we have /dev/ptc.])
+-else
+- AC_MSG_RESULT(no)
+-fi
+-
+-if test "$have_long_long" = yes
+-then
+- AC_MSG_CHECKING(for %lld and %llu printf() format support)
+- AC_CACHE_VAL(ac_cv_have_long_long_format,
+- AC_RUN_IFELSE([AC_LANG_SOURCE([[[
+- #include <stdio.h>
+- #include <stddef.h>
+- #include <string.h>
+-
+- #ifdef HAVE_SYS_TYPES_H
+- #include <sys/types.h>
+- #endif
+-
+- int main()
+- {
+- char buffer[256];
+-
+- if (sprintf(buffer, "%lld", (long long)123) < 0)
+- return 1;
+- if (strcmp(buffer, "123"))
+- return 1;
+-
+- if (sprintf(buffer, "%lld", (long long)-123) < 0)
+- return 1;
+- if (strcmp(buffer, "-123"))
+- return 1;
+-
+- if (sprintf(buffer, "%llu", (unsigned long long)123) < 0)
+- return 1;
+- if (strcmp(buffer, "123"))
+- return 1;
+-
+- return 0;
+- }
+- ]]])],
+- [ac_cv_have_long_long_format=yes],
+- [ac_cv_have_long_long_format=no],
+- [ac_cv_have_long_long_format=no])
+- )
+- AC_MSG_RESULT($ac_cv_have_long_long_format)
+-fi
+-
+-if test "$ac_cv_have_long_long_format" = yes
+-then
+- AC_DEFINE(PY_FORMAT_LONG_LONG, "ll",
+- [Define to printf format modifier for long long type])
+-fi
+-
+-if test $ac_sys_system = Darwin
+-then
+- LIBS="$LIBS -framework CoreFoundation"
+-fi
+-
+-
+-AC_CACHE_CHECK([for %zd printf() format support], ac_cv_have_size_t_format, [dnl
+-AC_RUN_IFELSE([AC_LANG_SOURCE([[
+-#include <stdio.h>
+-#include <stddef.h>
+-#include <string.h>
+-
+-#ifdef HAVE_SYS_TYPES_H
+-#include <sys/types.h>
+-#endif
+-
+-#ifdef HAVE_SSIZE_T
+-typedef ssize_t Py_ssize_t;
+-#elif SIZEOF_VOID_P == SIZEOF_LONG
+-typedef long Py_ssize_t;
+-#else
+-typedef int Py_ssize_t;
+-#endif
+-
+-int main()
+-{
+- char buffer[256];
+-
+- if(sprintf(buffer, "%zd", (size_t)123) < 0)
+- return 1;
+-
+- if (strcmp(buffer, "123"))
+- return 1;
+-
+- if (sprintf(buffer, "%zd", (Py_ssize_t)-123) < 0)
+- return 1;
+-
+- if (strcmp(buffer, "-123"))
+- return 1;
+-
+- return 0;
+-}
+-]])],
+-[ac_cv_have_size_t_format=yes],
+-[ac_cv_have_size_t_format=no],
+-[ac_cv_have_size_t_format="cross -- assuming yes"
+-])])
+-if test "$ac_cv_have_size_t_format" != no ; then
+- AC_DEFINE(PY_FORMAT_SIZE_T, "z",
+- [Define to printf format modifier for Py_ssize_t])
+-fi
+-
+-AC_CHECK_TYPE(socklen_t,,
+- AC_DEFINE(socklen_t,int,
+- [Define to `int' if <sys/socket.h> does not define.]),[
+-#ifdef HAVE_SYS_TYPES_H
+-#include <sys/types.h>
+-#endif
+-#ifdef HAVE_SYS_SOCKET_H
+-#include <sys/socket.h>
+-#endif
+-])
+-
+-case $ac_sys_system in
+-AIX*)
+- AC_DEFINE(HAVE_BROKEN_PIPE_BUF, 1, [Define if the system reports an invalid PIPE_BUF value.]) ;;
+-esac
+-
+-
+-AC_SUBST(THREADHEADERS)
+-
+-for h in `(cd $srcdir;echo Python/thread_*.h)`
+-do
+- THREADHEADERS="$THREADHEADERS \$(srcdir)/$h"
+-done
+-
+-AC_SUBST(SRCDIRS)
+-SRCDIRS="Parser Grammar Objects Python Modules Mac"
+-AC_MSG_CHECKING(for build directories)
+-for dir in $SRCDIRS; do
+- if test ! -d $dir; then
+- mkdir $dir
+- fi
+-done
+-AC_MSG_RESULT(done)
+-
+-# generate output files
+-AC_CONFIG_FILES(Makefile.pre Modules/Setup.config Misc/python.pc)
+-AC_CONFIG_FILES([Modules/ld_so_aix], [chmod +x Modules/ld_so_aix])
+-AC_OUTPUT
+-
+-echo "creating Modules/Setup"
+-if test ! -f Modules/Setup
+-then
+- cp $srcdir/Modules/Setup.dist Modules/Setup
+-fi
+-
+-echo "creating Modules/Setup.local"
+-if test ! -f Modules/Setup.local
+-then
+- echo "# Edit this file for local setup changes" >Modules/Setup.local
+-fi
+-
+-echo "creating Makefile"
+-$SHELL $srcdir/Modules/makesetup -c $srcdir/Modules/config.c.in \
+- -s Modules Modules/Setup.config \
+- Modules/Setup.local Modules/Setup
+-
+-case $ac_sys_system in
+-BeOS)
+- AC_MSG_WARN([
+-
+- Support for BeOS is deprecated as of Python 2.6.
+- See PEP 11 for the gory details.
+- ])
+- ;;
+-*) ;;
+-esac
+-
+-mv config.c Modules
+diff -r 70274d53c1dd pyconfig.h.in
+--- a/pyconfig.h.in
++++ b/pyconfig.h.in
+@@ -1,4 +1,4 @@
+-/* pyconfig.h.in. Generated from configure.in by autoheader. */
++/* pyconfig.h.in. Generated from configure.ac by autoheader. */
+
+
+ #ifndef Py_PYCONFIG_H
+diff -r 70274d53c1dd setup.py
+--- a/setup.py
++++ b/setup.py
+@@ -48,6 +48,9 @@
+ Returns True if 'path' can be located in an OSX SDK
+ """
+ return (path.startswith('/usr/') and not path.startswith('/usr/local')) or path.startswith('/System/')
++ return ( (path.startswith('/usr/') and not path.startswith('/usr/local'))
++ or path.startswith('/System/')
++ or path.startswith('/Library/') )
+
+ def find_file(filename, std_dirs, paths):
+ """Searches for the directory where a given file is located,
+@@ -186,7 +189,7 @@
+
+ # Python header files
+ headers = [sysconfig.get_config_h_filename()]
+- headers += glob(os.path.join(sysconfig.get_path('platinclude'), "*.h"))
++ headers += glob(os.path.join(sysconfig.get_path('include'), "*.h"))
+ for ext in self.extensions[:]:
+ ext.sources = [ find_module_file(filename, moddirlist)
+ for filename in ext.sources ]
+@@ -451,6 +454,10 @@
+ if platform in ['osf1', 'unixware7', 'openunix8']:
+ lib_dirs += ['/usr/ccs/lib']
+
++ # HP-UX11iv3 keeps files in lib/hpux folders.
++ if platform == 'hp-ux11':
++ lib_dirs += ['/usr/lib/hpux64', '/usr/lib/hpux32']
++
+ if platform == 'darwin':
+ # This should work on any unixy platform ;-)
+ # If the user has bothered specifying additional -I and -L flags
+@@ -1021,12 +1028,12 @@
+ if sys.platform == 'darwin':
+ sysroot = macosx_sdk_root()
+
+- for d in inc_dirs + sqlite_inc_paths:
++ for d_ in inc_dirs + sqlite_inc_paths:
++ d = d_
++ if sys.platform == 'darwin' and is_macosx_sdk_path(d):
++ d = os.path.join(sysroot, d[1:])
++
+ f = os.path.join(d, "sqlite3.h")
+-
+- if sys.platform == 'darwin' and is_macosx_sdk_path(d):
+- f = os.path.join(sysroot, d[1:], "sqlite3.h")
+-
+ if os.path.exists(f):
+ if sqlite_setup_debug: print "sqlite: found %s"%f
+ incf = open(f).read()
+@@ -1154,10 +1161,14 @@
+ for cand in dbm_order:
+ if cand == "ndbm":
+ if find_file("ndbm.h", inc_dirs, []) is not None:
+- # Some systems have -lndbm, others don't
++ # Some systems have -lndbm, others have -lgdbm_compat,
++ # others don't have either
+ if self.compiler.find_library_file(lib_dirs,
+ 'ndbm'):
+ ndbm_libs = ['ndbm']
++ elif self.compiler.find_library_file(lib_dirs,
++ 'gdbm_compat'):
++ ndbm_libs = ['gdbm_compat']
+ else:
+ ndbm_libs = []
+ print "building dbm using ndbm"
diff --git a/examples/python2.7/patches/hotshot-import.diff b/examples/python2.7/patches/hotshot-import.diff
new file mode 100644
index 0000000..80d6407
--- /dev/null
+++ b/examples/python2.7/patches/hotshot-import.diff
@@ -0,0 +1,17 @@
+# DP: hotshot: Check for the availability of the profile and pstats modules.
+
+--- a/Lib/hotshot/stats.py
++++ b/Lib/hotshot/stats.py
+@@ -1,7 +1,10 @@
+ """Statistics analyzer for HotShot."""
+
+-import profile
+-import pstats
++try:
++ import profile
++ import pstats
++except ImportError, e:
++ raise ImportError, str(e) + '; please install the python-profiler package'
+
+ import hotshot.log
+
diff --git a/examples/python2.7/patches/hurd-broken-poll.diff b/examples/python2.7/patches/hurd-broken-poll.diff
new file mode 100644
index 0000000..825e3fb
--- /dev/null
+++ b/examples/python2.7/patches/hurd-broken-poll.diff
@@ -0,0 +1,23 @@
+# DP: Fix build failure on hurd, working around poll() on systems
+# DP: on which it returns an error on invalid FDs.
+
+--- a/Modules/selectmodule.c
++++ b/Modules/selectmodule.c
+@@ -1736,7 +1736,7 @@
+
+ static PyMethodDef select_methods[] = {
+ {"select", select_select, METH_VARARGS, select_doc},
+-#ifdef HAVE_POLL
++#if defined(HAVE_POLL) && !defined(HAVE_BROKEN_POLL)
+ {"poll", select_poll, METH_NOARGS, poll_doc},
+ #endif /* HAVE_POLL */
+ {0, 0}, /* sentinel */
+@@ -1768,7 +1768,7 @@
+ PyModule_AddIntConstant(m, "PIPE_BUF", PIPE_BUF);
+ #endif
+
+-#if defined(HAVE_POLL)
++#if defined(HAVE_POLL) && !defined(HAVE_BROKEN_POLL)
+ #ifdef __APPLE__
+ if (select_have_broken_poll()) {
+ if (PyObject_DelAttrString(m, "poll") == -1) {
diff --git a/examples/python2.7/patches/hurd-disable-nonworking-constants.diff b/examples/python2.7/patches/hurd-disable-nonworking-constants.diff
new file mode 100644
index 0000000..cfd5241
--- /dev/null
+++ b/examples/python2.7/patches/hurd-disable-nonworking-constants.diff
@@ -0,0 +1,34 @@
+# DP: Comment out constant exposed on the API which are not implemented on
+# DP: GNU/Hurd. They would not work at runtime anyway.
+
+--- a/Modules/posixmodule.c
++++ b/Modules/posixmodule.c
+@@ -9193,12 +9193,14 @@
+ #ifdef O_LARGEFILE
+ if (ins(d, "O_LARGEFILE", (long)O_LARGEFILE)) return -1;
+ #endif
++#ifndef __GNU__
+ #ifdef O_SHLOCK
+ if (ins(d, "O_SHLOCK", (long)O_SHLOCK)) return -1;
+ #endif
+ #ifdef O_EXLOCK
+ if (ins(d, "O_EXLOCK", (long)O_EXLOCK)) return -1;
+ #endif
++#endif
+
+ /* MS Windows */
+ #ifdef O_NOINHERIT
+--- a/Modules/socketmodule.c
++++ b/Modules/socketmodule.c
+@@ -4815,9 +4815,11 @@
+ #ifdef SO_OOBINLINE
+ PyModule_AddIntConstant(m, "SO_OOBINLINE", SO_OOBINLINE);
+ #endif
++#ifndef __GNU__
+ #ifdef SO_REUSEPORT
+ PyModule_AddIntConstant(m, "SO_REUSEPORT", SO_REUSEPORT);
+ #endif
++#endif
+ #ifdef SO_SNDBUF
+ PyModule_AddIntConstant(m, "SO_SNDBUF", SO_SNDBUF);
+ #endif
diff --git a/examples/python2.7/patches/issue15340.diff b/examples/python2.7/patches/issue15340.diff
new file mode 100644
index 0000000..31cc6ed
--- /dev/null
+++ b/examples/python2.7/patches/issue15340.diff
@@ -0,0 +1,16 @@
+# DP: - Issue #15340: Fix importing the random module when /dev/urandom cannot
+# DP: be opened. This was a regression caused by the hash randomization patch.
+
+diff -r 8cd6acffbcb9 -r edbf37ace03c Python/random.c
+--- a/Python/random.c Fri Sep 07 00:55:33 2012 +0200
++++ b/Python/random.c Fri Sep 07 23:49:07 2012 +0200
+@@ -165,7 +165,8 @@
+ Py_END_ALLOW_THREADS
+ if (fd < 0)
+ {
+- PyErr_SetFromErrnoWithFilename(PyExc_OSError, "/dev/urandom");
++ PyErr_SetString(PyExc_NotImplementedError,
++ "/dev/urandom (or equivalent) not found");
+ return -1;
+ }
+
diff --git a/examples/python2.7/patches/issue15847.diff b/examples/python2.7/patches/issue15847.diff
new file mode 100644
index 0000000..a4af8b8
--- /dev/null
+++ b/examples/python2.7/patches/issue15847.diff
@@ -0,0 +1,47 @@
+# DP: Fix issue #15847: allow args to be a tuple in parse_args.
+
+diff -r edbf37ace03c -r a2147bbf7868 Lib/argparse.py
+--- a/Lib/argparse.py Fri Sep 07 23:49:07 2012 +0200
++++ b/Lib/argparse.py Sat Sep 08 12:15:25 2012 -0400
+@@ -1692,9 +1692,12 @@
+ return args
+
+ def parse_known_args(self, args=None, namespace=None):
+- # args default to the system args
+ if args is None:
++ # args default to the system args
+ args = _sys.argv[1:]
++ else:
++ # make sure that args are mutable
++ args = list(args)
+
+ # default Namespace built from parser defaults
+ if namespace is None:
+diff -r edbf37ace03c -r a2147bbf7868 Lib/test/test_argparse.py
+--- a/Lib/test/test_argparse.py Fri Sep 07 23:49:07 2012 +0200
++++ b/Lib/test/test_argparse.py Sat Sep 08 12:15:25 2012 -0400
+@@ -4486,6 +4486,24 @@
+
+ class TestParseKnownArgs(TestCase):
+
++ def test_arguments_tuple(self):
++ parser = argparse.ArgumentParser()
++ parser.parse_args(())
++
++ def test_arguments_list(self):
++ parser = argparse.ArgumentParser()
++ parser.parse_args([])
++
++ def test_arguments_tuple_positional(self):
++ parser = argparse.ArgumentParser()
++ parser.add_argument('x')
++ parser.parse_args(('x',))
++
++ def test_arguments_list_positional(self):
++ parser = argparse.ArgumentParser()
++ parser.add_argument('x')
++ parser.parse_args(['x'])
++
+ def test_optionals(self):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--foo')
diff --git a/examples/python2.7/patches/issue9012a.diff b/examples/python2.7/patches/issue9012a.diff
new file mode 100644
index 0000000..04aa548
--- /dev/null
+++ b/examples/python2.7/patches/issue9012a.diff
@@ -0,0 +1,13 @@
+# DP: Link _math.o only once to the static library.
+
+--- a/Modules/Setup.dist
++++ b/Modules/Setup.dist
+@@ -169,7 +169,7 @@
+ # Modules that should always be present (non UNIX dependent):
+
+ #array arraymodule.c # array objects
+-#cmath cmathmodule.c _math.c # -lm # complex math library functions
++#cmath cmathmodule.c # -lm # complex math library functions
+ #math mathmodule.c _math.c # -lm # math library functions, e.g. sin()
+ #_struct _struct.c # binary structure packing/unpacking
+ #time timemodule.c # -lm # time operations and variables
diff --git a/examples/python2.7/patches/issue9189.diff b/examples/python2.7/patches/issue9189.diff
new file mode 100644
index 0000000..e553893
--- /dev/null
+++ b/examples/python2.7/patches/issue9189.diff
@@ -0,0 +1,342 @@
+Index: b/Lib/sysconfig.py
+===================================================================
+--- a/Lib/sysconfig.py
++++ b/Lib/sysconfig.py
+@@ -227,11 +227,19 @@
+ done[n] = v
+
+ # do variable interpolation here
+- while notdone:
+- for name in notdone.keys():
++ variables = list(notdone.keys())
++
++ # Variables with a 'PY_' prefix in the makefile. These need to
++ # be made available without that prefix through sysconfig.
++ # Special care is needed to ensure that variable expansion works, even
++ # if the expansion uses the name without a prefix.
++ renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
++
++ while len(variables) > 0:
++ for name in tuple(variables):
+ value = notdone[name]
+ m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
+- if m:
++ if m is not None:
+ n = m.group(1)
+ found = True
+ if n in done:
+@@ -242,23 +250,48 @@
+ elif n in os.environ:
+ # do it like make: fall back to environment
+ item = os.environ[n]
++
++ elif n in renamed_variables:
++ if name.startswith('PY_') and name[3:] in renamed_variables:
++ item = ""
++
++ elif 'PY_' + n in notdone:
++ found = False
++
++ else:
++ item = str(done['PY_' + n])
++
+ else:
+ done[n] = item = ""
++
+ if found:
+ after = value[m.end():]
+ value = value[:m.start()] + item + after
+ if "$" in after:
+ notdone[name] = value
+ else:
+- try: value = int(value)
++ try:
++ value = int(value)
+ except ValueError:
+ done[name] = value.strip()
+ else:
+ done[name] = value
+- del notdone[name]
++ variables.remove(name)
++
++ if name.startswith('PY_') \
++ and name[3:] in renamed_variables:
++
++ name = name[3:]
++ if name not in done:
++ done[name] = value
++
++
+ else:
+- # bogus variable reference; just drop it since we can't deal
+- del notdone[name]
++ # bogus variable reference (e.g. "prefix=$/opt/python");
++ # just drop it since we can't deal
++ done[name] = value
++ variables.remove(name)
++
+ # strip spurious spaces
+ for k, v in done.items():
+ if isinstance(v, str):
+Index: b/Makefile.pre.in
+===================================================================
+--- a/Makefile.pre.in
++++ b/Makefile.pre.in
+@@ -62,12 +62,18 @@
+ # Compiler options
+ OPT= @OPT@
+ BASECFLAGS= @BASECFLAGS@
+-CFLAGS= $(BASECFLAGS) @CFLAGS@ $(OPT) $(EXTRA_CFLAGS)
++CONFIGURE_CFLAGS= @CFLAGS@
++CONFIGURE_CPPFLAGS= @CPPFLAGS@
++CONFIGURE_LDFLAGS= @LDFLAGS@
++# Avoid assigning CFLAGS, LDFLAGS, etc. so users can use them on the
++# command line to append to these values without stomping the pre-set
++# values.
++PY_CFLAGS= $(BASECFLAGS) $(OPT) $(CONFIGURE_CFLAGS) $(CFLAGS) $(EXTRA_CFLAGS)
+ # Both CPPFLAGS and LDFLAGS need to contain the shell's value for setup.py to
+ # be able to build extension modules using the directories specified in the
+ # environment variables
+-CPPFLAGS= -I. -IInclude -I$(srcdir)/Include @CPPFLAGS@
+-LDFLAGS= @LDFLAGS@
++PY_CPPFLAGS= -I. -IInclude -I$(srcdir)/Include $(CONFIGURE_CPPFLAGS) $(CPPFLAGS)
++PY_LDFLAGS= $(CONFIGURE_LDFLAGS) $(LDFLAGS)
+ LDLAST= @LDLAST@
+ SGI_ABI= @SGI_ABI@
+ CCSHARED= @CCSHARED@
+@@ -76,7 +82,7 @@
+ # Extra C flags added for building the interpreter object files.
+ CFLAGSFORSHARED=@CFLAGSFORSHARED@
+ # C flags used for building the interpreter object files
+-PY_CFLAGS= $(CFLAGS) $(CPPFLAGS) $(CFLAGSFORSHARED) -DPy_BUILD_CORE
++PY_CORE_CFLAGS= $(PY_CFLAGS) $(PY_CPPFLAGS) $(CFLAGSFORSHARED) -DPy_BUILD_CORE
+
+
+ # Machine-dependent subdirectories
+@@ -397,7 +403,7 @@
+
+ # Build the interpreter
+ $(BUILDPYTHON): Modules/python.o $(LIBRARY) $(LDLIBRARY)
+- $(LINKCC) $(LDFLAGS) $(LINKFORSHARED) -o $@ \
++ $(LINKCC) $(PY_LDFLAGS) $(LINKFORSHARED) -o $@ \
+ Modules/python.o \
+ $(BLDLIBRARY) $(LIBS) $(MODLIBS) $(SYSLIBS) $(LDLAST)
+
+@@ -413,7 +419,7 @@
+ s*) quiet="-q";; \
+ *) quiet="";; \
+ esac; \
+- $(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' \
++ $(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' LDFLAGS='$(PY_LDFLAGS)' OPT='$(OPT)' \
+ ./$(BUILDPYTHON) -E $(srcdir)/setup.py $$quiet build
+
+ # Build static library
+@@ -430,18 +436,18 @@
+
+ libpython$(VERSION).so: $(LIBRARY_OBJS)
+ if test $(INSTSONAME) != $(LDLIBRARY); then \
+- $(BLDSHARED) -Wl,-h$(INSTSONAME) -o $(INSTSONAME) $(LIBRARY_OBJS) $(MODLIBS) $(SHLIBS) $(LIBC) $(LIBM) $(LDLAST); \
++ $(BLDSHARED) $(PY_LDFLAGS) -Wl,-h$(INSTSONAME) -o $(INSTSONAME) $(LIBRARY_OBJS) $(MODLIBS) $(SHLIBS) $(LIBC) $(LIBM) $(LDLAST); \
+ $(LN) -f $(INSTSONAME) $@; \
+ else \
+- $(BLDSHARED) -o $@ $(LIBRARY_OBJS) $(MODLIBS) $(SHLIBS) $(LIBC) $(LIBM) $(LDLAST); \
++ $(BLDSHARED) $(PY_LDFLAGS) -o $@ $(LIBRARY_OBJS) $(MODLIBS) $(SHLIBS) $(LIBC) $(LIBM) $(LDLAST); \
+ fi
+
+ libpython$(VERSION).dylib: $(LIBRARY_OBJS)
+- $(CC) -dynamiclib -Wl,-single_module $(LDFLAGS) -undefined dynamic_lookup -Wl,-install_name,$(prefix)/lib/libpython$(VERSION).dylib -Wl,-compatibility_version,$(VERSION) -Wl,-current_version,$(VERSION) -o $@ $(LIBRARY_OBJS) $(SHLIBS) $(LIBC) $(LIBM) $(LDLAST); \
++ $(CC) -dynamiclib -Wl,-single_module $(PY_LDFLAGS) -undefined dynamic_lookup -Wl,-install_name,$(prefix)/lib/libpython$(VERSION).dylib -Wl,-compatibility_version,$(VERSION) -Wl,-current_version,$(VERSION) -o $@ $(LIBRARY_OBJS) $(SHLIBS) $(LIBC) $(LIBM) $(LDLAST); \
+
+
+ libpython$(VERSION).sl: $(LIBRARY_OBJS)
+- $(LDSHARED) -o $@ $(LIBRARY_OBJS) $(MODLIBS) $(SHLIBS) $(LIBC) $(LIBM) $(LDLAST)
++ $(LDSHARED) $(PY_LDFLAGS) -o $@ $(LIBRARY_OBJS) $(MODLIBS) $(SHLIBS) $(LIBC) $(LIBM) $(LDLAST)
+
+ # Copy up the gdb python hooks into a position where they can be automatically
+ # loaded by gdb during Lib/test/test_gdb.py
+@@ -480,7 +486,7 @@
+ # for a shared core library; otherwise, this rule is a noop.
+ $(DLLLIBRARY) libpython$(VERSION).dll.a: $(LIBRARY_OBJS)
+ if test -n "$(DLLLIBRARY)"; then \
+- $(LDSHARED) -Wl,--out-implib=$@ -o $(DLLLIBRARY) $^ \
++ $(LDSHARED) $(PY_LDFLAGS) -Wl,--out-implib=$@ -o $(DLLLIBRARY) $^ \
+ $(LIBS) $(MODLIBS) $(SYSLIBS) $(LDLAST); \
+ else true; \
+ fi
+@@ -524,7 +530,7 @@
+ $(SIGNAL_OBJS) \
+ $(MODOBJS) \
+ $(srcdir)/Modules/getbuildinfo.c
+- $(CC) -c $(PY_CFLAGS) \
++ $(CC) -c $(PY_CORE_CFLAGS) \
+ -DSVNVERSION="\"`LC_ALL=C $(SVNVERSION)`\"" \
+ -DHGVERSION="\"`LC_ALL=C $(HGVERSION)`\"" \
+ -DHGTAG="\"`LC_ALL=C $(HGTAG)`\"" \
+@@ -532,7 +538,7 @@
+ -o $@ $(srcdir)/Modules/getbuildinfo.c
+
+ Modules/getpath.o: $(srcdir)/Modules/getpath.c Makefile
+- $(CC) -c $(PY_CFLAGS) -DPYTHONPATH='"$(PYTHONPATH)"' \
++ $(CC) -c $(PY_CORE_CFLAGS) -DPYTHONPATH='"$(PYTHONPATH)"' \
+ -DPREFIX='"$(prefix)"' \
+ -DEXEC_PREFIX='"$(exec_prefix)"' \
+ -DVERSION='"$(VERSION)"' \
+@@ -540,7 +546,7 @@
+ -o $@ $(srcdir)/Modules/getpath.c
+
+ Modules/python.o: $(srcdir)/Modules/python.c
+- $(MAINCC) -c $(PY_CFLAGS) -o $@ $(srcdir)/Modules/python.c
++ $(MAINCC) -c $(PY_CORE_CFLAGS) -o $@ $(srcdir)/Modules/python.c
+
+
+ # Use a stamp file to prevent make -j invoking pgen twice
+@@ -551,7 +557,7 @@
+ -touch Parser/pgen.stamp
+
+ $(PGEN): $(PGENOBJS)
+- $(CC) $(OPT) $(LDFLAGS) $(PGENOBJS) $(LIBS) -o $(PGEN)
++ $(CC) $(OPT) $(PY_LDFLAGS) $(PGENOBJS) $(LIBS) -o $(PGEN)
+
+ Parser/grammar.o: $(srcdir)/Parser/grammar.c \
+ $(srcdir)/Include/token.h \
+@@ -571,10 +577,10 @@
+ Python/compile.o Python/symtable.o Python/ast.o: $(GRAMMAR_H) $(AST_H)
+
+ Python/getplatform.o: $(srcdir)/Python/getplatform.c
+- $(CC) -c $(PY_CFLAGS) -DPLATFORM='"$(MACHDEP)"' -o $@ $(srcdir)/Python/getplatform.c
++ $(CC) -c $(PY_CORE_CFLAGS) -DPLATFORM='"$(MACHDEP)"' -o $@ $(srcdir)/Python/getplatform.c
+
+ Python/importdl.o: $(srcdir)/Python/importdl.c
+- $(CC) -c $(PY_CFLAGS) -I$(DLINCLDIR) -o $@ $(srcdir)/Python/importdl.c
++ $(CC) -c $(PY_CORE_CFLAGS) -I$(DLINCLDIR) -o $@ $(srcdir)/Python/importdl.c
+
+ Objects/unicodectype.o: $(srcdir)/Objects/unicodectype.c \
+ $(srcdir)/Objects/unicodetype_db.h
+@@ -1157,7 +1163,7 @@
+
+ # Some make's put the object file in the current directory
+ .c.o:
+- $(CC) -c $(PY_CFLAGS) -o $@ $<
++ $(CC) -c $(PY_CORE_CFLAGS) -o $@ $<
+
+ # Run reindent on the library
+ reindent:
+Index: b/Modules/makesetup
+===================================================================
+--- a/Modules/makesetup
++++ b/Modules/makesetup
+@@ -219,7 +219,7 @@
+ case $doconfig in
+ no) cc="$cc \$(CCSHARED) \$(CFLAGS) \$(CPPFLAGS)";;
+ *)
+- cc="$cc \$(PY_CFLAGS)";;
++ cc="$cc \$(PY_CORE_CFLAGS)";;
+ esac
+ rule="$obj: $src; $cc $cpps -c $src -o $obj"
+ echo "$rule" >>$rulesf
+Index: b/configure.ac
+===================================================================
+--- a/configure.ac
++++ b/configure.ac
+@@ -507,14 +507,13 @@
+ (it is also a good idea to do 'make clean' before compiling)])
+ fi
+
+-# If the user set CFLAGS, use this instead of the automatically
+-# determined setting
+-preset_cflags="$CFLAGS"
+-AC_PROG_CC
+-if test ! -z "$preset_cflags"
+-then
+- CFLAGS=$preset_cflags
++# Don't let AC_PROG_CC set the default CFLAGS. It normally sets -g -O2
++# when the compiler supports them, but we don't always want -O2, and
++# we set -g later.
++if test -z "$CFLAGS"; then
++ CFLAGS=
+ fi
++AC_PROG_CC
+
+ AC_SUBST(CXX)
+ AC_SUBST(MAINCC)
+Index: b/Lib/distutils/sysconfig.py
+===================================================================
+--- a/Lib/distutils/sysconfig.py
++++ b/Lib/distutils/sysconfig.py
+@@ -322,11 +322,19 @@
+ done[n] = v
+
+ # do variable interpolation here
+- while notdone:
+- for name in notdone.keys():
++ variables = list(notdone.keys())
++
++ # Variables with a 'PY_' prefix in the makefile. These need to
++ # be made available without that prefix through sysconfig.
++ # Special care is needed to ensure that variable expansion works, even
++ # if the expansion uses the name without a prefix.
++ renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
++
++ while len(variables) > 0:
++ for name in tuple(variables):
+ value = notdone[name]
+ m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
+- if m:
++ if m is not None:
+ n = m.group(1)
+ found = True
+ if n in done:
+@@ -337,25 +345,47 @@
+ elif n in os.environ:
+ # do it like make: fall back to environment
+ item = os.environ[n]
++
++ elif n in renamed_variables:
++ if name.startswith('PY_') and name[3:] in renamed_variables:
++ item = ""
++
++ elif 'PY_' + n in notdone:
++ found = False
++
++ else:
++ item = str(done['PY_' + n])
++
+ else:
+ done[n] = item = ""
++
+ if found:
+ after = value[m.end():]
+ value = value[:m.start()] + item + after
+ if "$" in after:
+ notdone[name] = value
+ else:
+- try: value = int(value)
++ try:
++ value = int(value)
+ except ValueError:
+ done[name] = value.strip()
+ else:
+ done[name] = value
+- del notdone[name]
+- else:
+- # bogus variable reference; just drop it since we can't deal
+- del notdone[name]
++ variables.remove(name)
++
++ if name.startswith('PY_') \
++ and name[3:] in renamed_variables:
+
+- fp.close()
++ name = name[3:]
++ if name not in done:
++ done[name] = value
++
++
++ else:
++ # bogus variable reference (e.g. "prefix=$/opt/python");
++ # just drop it since we can't deal
++ done[name] = value
++ variables.remove(name)
+
+ # strip spurious spaces
+ for k, v in done.items():
diff --git a/examples/python2.7/patches/issue9374-followup.diff b/examples/python2.7/patches/issue9374-followup.diff
new file mode 100644
index 0000000..7ef601c
--- /dev/null
+++ b/examples/python2.7/patches/issue9374-followup.diff
@@ -0,0 +1,30 @@
+# DP: Follwup for issue #9374. Restore the removed lists of the urlparse module.
+
+# HG changeset patch
+# User Georg Brandl <georg@python.org>
+# Date 1345825048 -7200
+# Node ID a434812109641df2c2af33afa030d8d06eef693c
+# Parent 787ed9b03ef9b8e26638b28ec55330a871e76495
+Closes #9374: add back now-unused module attributes; removing them is a backward compatibility issue, since they have a public-seeming name.
+
+diff --git a/Lib/urlparse.py b/Lib/urlparse.py
+--- a/Lib/urlparse.py
++++ b/Lib/urlparse.py
+@@ -44,6 +44,16 @@ uses_params = ['ftp', 'hdl', 'prospero',
+ 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
+ 'mms', '', 'sftp']
+
++# These are not actually used anymore, but should stay for backwards
++# compatibility. (They are undocumented, but have a public-looking name.)
++non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
++ 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
++uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
++ 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
++uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
++ 'nntp', 'wais', 'https', 'shttp', 'snews',
++ 'file', 'prospero', '']
++
+ # Characters valid in scheme names
+ scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+
diff --git a/examples/python2.7/patches/linecache.diff b/examples/python2.7/patches/linecache.diff
new file mode 100644
index 0000000..a7fdd5a
--- /dev/null
+++ b/examples/python2.7/patches/linecache.diff
@@ -0,0 +1,16 @@
+# DP: Proper handling of packages in linecache.py
+
+--- a/Lib/linecache.py
++++ b/Lib/linecache.py
+@@ -108,6 +108,11 @@
+ if os.path.isabs(filename):
+ return []
+
++ # Take care to handle packages
++ if basename == '__init__.py':
++ # filename referes to a package
++ basename = filename
++
+ for dirname in sys.path:
+ # When using imputil, sys.path may contain things other than
+ # strings; ignore them when it happens.
diff --git a/examples/python2.7/patches/link-system-expat.diff b/examples/python2.7/patches/link-system-expat.diff
new file mode 100644
index 0000000..841d8c6
--- /dev/null
+++ b/examples/python2.7/patches/link-system-expat.diff
@@ -0,0 +1,22 @@
+# DP: Link with the system expat
+
+--- a/Modules/Setup.dist
++++ b/Modules/Setup.dist
+@@ -180,7 +180,7 @@
+ #itertools itertoolsmodule.c # Functions creating iterators for efficient looping
+ #strop stropmodule.c # String manipulations
+ #_functools _functoolsmodule.c # Tools for working with functions and callable objects
+-#_elementtree -I$(srcdir)/Modules/expat -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI _elementtree.c # elementtree accelerator
++#_elementtree -DUSE_PYEXPAT_CAPI _elementtree.c # elementtree accelerator
+ #_pickle _pickle.c # pickle accelerator
+ #datetime datetimemodule.c # date/time type
+ #_bisect _bisectmodule.c # Bisection algorithms
+@@ -471,7 +471,7 @@
+ #
+ # More information on Expat can be found at www.libexpat.org.
+ #
+-#pyexpat expat/xmlparse.c expat/xmlrole.c expat/xmltok.c pyexpat.c -I$(srcdir)/Modules/expat -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI
++#pyexpat pyexpat.c -lexpat
+
+
+ # Hye-Shik Chang's CJKCodecs
diff --git a/examples/python2.7/patches/link-whole-archive.diff b/examples/python2.7/patches/link-whole-archive.diff
new file mode 100644
index 0000000..29d14d2
--- /dev/null
+++ b/examples/python2.7/patches/link-whole-archive.diff
@@ -0,0 +1,13 @@
+# DP: Link libpython with --whole-archive.
+
+--- a/Makefile.pre.in
++++ b/Makefile.pre.in
+@@ -406,7 +406,7 @@
+ $(BUILDPYTHON): Modules/python.o $(LIBRARY) $(LDLIBRARY)
+ $(LINKCC) $(PY_LDFLAGS) $(LINKFORSHARED) -o $@ \
+ Modules/python.o \
+- $(BLDLIBRARY) $(LIBS) $(MODLIBS) $(SYSLIBS) $(LDLAST)
++ -Wl,--whole-archive $(BLDLIBRARY) -Wl,--no-whole-archive $(LIBS) $(MODLIBS) $(SYSLIBS) $(LDLAST)
+
+ platform: $(BUILDPYTHON)
+ $(RUNSHARED) ./$(BUILDPYTHON) -E -c 'import sys ; from sysconfig import get_platform ; print get_platform()+"-"+sys.version[0:3]' >platform
diff --git a/examples/python2.7/patches/locale-module.diff b/examples/python2.7/patches/locale-module.diff
new file mode 100644
index 0000000..1c3f52f
--- /dev/null
+++ b/examples/python2.7/patches/locale-module.diff
@@ -0,0 +1,17 @@
+# DP: * Lib/locale.py:
+# DP: - Don't map 'utf8', 'utf-8' to 'utf', which is not a known encoding
+# DP: for glibc.
+
+--- a/Lib/locale.py
++++ b/Lib/locale.py
+@@ -1520,8 +1520,8 @@
+ 'uk_ua.iso88595': 'uk_UA.ISO8859-5',
+ 'uk_ua.koi8u': 'uk_UA.KOI8-U',
+ 'uk_ua.microsoftcp1251': 'uk_UA.CP1251',
+- 'univ': 'en_US.utf',
+- 'universal': 'en_US.utf',
++ 'univ': 'en_US.UTF-8',
++ 'universal': 'en_US.UTF-8',
+ 'universal.utf8@ucs4': 'en_US.UTF-8',
+ 'ur': 'ur_PK.CP1256',
+ 'ur_pk': 'ur_PK.CP1256',
diff --git a/examples/python2.7/patches/makeflags.diff b/examples/python2.7/patches/makeflags.diff
new file mode 100644
index 0000000..e85238e
--- /dev/null
+++ b/examples/python2.7/patches/makeflags.diff
@@ -0,0 +1,24 @@
+Index: b/Makefile.pre.in
+===================================================================
+--- a/Makefile.pre.in
++++ b/Makefile.pre.in
+@@ -406,11 +406,15 @@
+
+
+ # Build the shared modules
++# MAKEFLAGS are sorted and normalized. Under GNU make the 's' for
++# -s, --silent or --quiet is always the first char.
+ sharedmods: $(BUILDPYTHON)
+- @case $$MAKEFLAGS in \
+- *s*) $(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' ./$(BUILDPYTHON) -E $(srcdir)/setup.py -q build;; \
+- *) $(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' ./$(BUILDPYTHON) -E $(srcdir)/setup.py build;; \
+- esac
++ @case "$$MAKEFLAGS" in \
++ s*) quiet="-q";; \
++ *) quiet="";; \
++ esac; \
++ $(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' \
++ ./$(BUILDPYTHON) -E $(srcdir)/setup.py $$quiet build
+
+ # Build static library
+ # avoid long command lines, same as LIBRARY_OBJS
diff --git a/examples/python2.7/patches/makesetup-bashism.diff b/examples/python2.7/patches/makesetup-bashism.diff
new file mode 100644
index 0000000..51ee475
--- /dev/null
+++ b/examples/python2.7/patches/makesetup-bashism.diff
@@ -0,0 +1,13 @@
+# DP: Fix bashism in makesetup shell script
+
+--- a/Modules/makesetup
++++ b/Modules/makesetup
+@@ -281,7 +281,7 @@
+ -) ;;
+ *) sedf="@sed.in.$$"
+ trap 'rm -f $sedf' 0 1 2 3
+- echo "1i\\" >$sedf
++ printf "1i\\" >$sedf
+ str="# Generated automatically from $makepre by makesetup."
+ echo "$str" >>$sedf
+ echo "s%_MODOBJS_%$OBJS%" >>$sedf
diff --git a/examples/python2.7/patches/multiprocessing-typos.diff b/examples/python2.7/patches/multiprocessing-typos.diff
new file mode 100644
index 0000000..2ed3ba8
--- /dev/null
+++ b/examples/python2.7/patches/multiprocessing-typos.diff
@@ -0,0 +1,24 @@
+# DP: Fix typos in the multiprocessing module.
+
+--- a/Modules/_multiprocessing/multiprocessing.c
++++ b/Modules/_multiprocessing/multiprocessing.c
+@@ -63,7 +63,7 @@
+ break;
+ default:
+ PyErr_Format(PyExc_RuntimeError,
+- "unkown error number %d", num);
++ "unknown error number %d", num);
+ }
+ return NULL;
+ }
+--- a/Lib/multiprocessing/synchronize.py
++++ b/Lib/multiprocessing/synchronize.py
+@@ -226,7 +226,7 @@
+ num_waiters = (self._sleeping_count._semlock._get_value() -
+ self._woken_count._semlock._get_value())
+ except Exception:
+- num_waiters = 'unkown'
++ num_waiters = 'unknown'
+ return '<Condition(%s, %s)>' % (self._lock, num_waiters)
+
+ def wait(self, timeout=None):
diff --git a/examples/python2.7/patches/ncursesw-incdir.diff b/examples/python2.7/patches/ncursesw-incdir.diff
new file mode 100644
index 0000000..1ce0b57
--- /dev/null
+++ b/examples/python2.7/patches/ncursesw-incdir.diff
@@ -0,0 +1,66 @@
+# DP: use the correct include directory when linking with ncursesw.
+
+--- a/setup.py
++++ b/setup.py
+@@ -1251,13 +1251,17 @@
+ # Curses support, requiring the System V version of curses, often
+ # provided by the ncurses library.
+ panel_library = 'panel'
++ curses_incs = None
+ if curses_library.startswith('ncurses'):
+ if curses_library == 'ncursesw':
+ # Bug 1464056: If _curses.so links with ncursesw,
+ # _curses_panel.so must link with panelw.
+ panel_library = 'panelw'
+ curses_libs = [curses_library]
++ curses_incs = find_file('curses.h', inc_dirs,
++ [os.path.join(d, 'ncursesw') for d in inc_dirs])
+ exts.append( Extension('_curses', ['_cursesmodule.c'],
++ include_dirs = curses_incs,
+ libraries = curses_libs) )
+ elif curses_library == 'curses' and platform != 'darwin':
+ # OSX has an old Berkeley curses, not good enough for
+@@ -1278,6 +1282,7 @@
+ if (module_enabled(exts, '_curses') and
+ self.compiler.find_library_file(lib_dirs, panel_library)):
+ exts.append( Extension('_curses_panel', ['_curses_panel.c'],
++ include_dirs = curses_incs,
+ libraries = [panel_library] + curses_libs) )
+ else:
+ missing.append('_curses_panel')
+--- a/configure.ac
++++ b/configure.ac
+@@ -1374,6 +1374,8 @@
+
+ # checks for header files
+ AC_HEADER_STDC
++ac_save_cppflags="$CPPFLAGS"
++CPPFLAGS="$CPPFLAGS -I/usr/include/ncursesw"
+ AC_CHECK_HEADERS(asm/types.h conio.h curses.h direct.h dlfcn.h errno.h \
+ fcntl.h grp.h \
+ ieeefp.h io.h langinfo.h libintl.h ncurses.h poll.h process.h pthread.h \
+@@ -1395,6 +1397,7 @@
+ #include <curses.h>
+ #endif
+ ])
++CPPFLAGS=$ac_save_cppflags
+
+ # On Linux, netlink.h requires asm/types.h
+ AC_CHECK_HEADERS(linux/netlink.h,,,[
+@@ -4123,6 +4126,8 @@
+ [Define if you have struct stat.st_mtimensec])
+ fi
+
++ac_save_cppflags="$CPPFLAGS"
++CPPFLAGS="$CPPFLAGS -I/usr/include/ncursesw"
+ # On HP/UX 11.0, mvwdelch is a block with a return statement
+ AC_MSG_CHECKING(whether mvwdelch is an expression)
+ AC_CACHE_VAL(ac_cv_mvwdelch_is_expression,
+@@ -4177,6 +4182,7 @@
+ AC_MSG_RESULT(yes)],
+ [AC_MSG_RESULT(no)]
+ )
++CPPFLAGS=$ac_save_cppflags
+
+ AC_MSG_CHECKING(for /dev/ptmx)
+
diff --git a/examples/python2.7/patches/no-zip-on-sys.path.diff b/examples/python2.7/patches/no-zip-on-sys.path.diff
new file mode 100644
index 0000000..74ce88f
--- /dev/null
+++ b/examples/python2.7/patches/no-zip-on-sys.path.diff
@@ -0,0 +1,52 @@
+# DP: Do not add /usr/lib/pythonXY.zip on sys.path.
+
+--- a/Modules/getpath.c
++++ b/Modules/getpath.c
+@@ -380,7 +380,9 @@
+ char *path = getenv("PATH");
+ char *prog = Py_GetProgramName();
+ char argv0_path[MAXPATHLEN+1];
++#ifdef WITH_ZIP_PATH
+ char zip_path[MAXPATHLEN+1];
++#endif
+ int pfound, efound; /* 1 if found; -1 if found build directory */
+ char *buf;
+ size_t bufsz;
+@@ -520,6 +522,7 @@
+ else
+ reduce(prefix);
+
++#ifdef WITH_ZIP_PATH
+ strncpy(zip_path, prefix, MAXPATHLEN);
+ zip_path[MAXPATHLEN] = '\0';
+ if (pfound > 0) { /* Use the reduced prefix returned by Py_GetPrefix() */
+@@ -532,6 +535,7 @@
+ bufsz = strlen(zip_path); /* Replace "00" with version */
+ zip_path[bufsz - 6] = VERSION[0];
+ zip_path[bufsz - 5] = VERSION[2];
++#endif
+
+ if (!(efound = search_for_exec_prefix(argv0_path, home))) {
+ if (!Py_FrozenFlag)
+@@ -571,7 +575,9 @@
+ defpath = delim + 1;
+ }
+
++#ifdef WITH_ZIP_PATH
+ bufsz += strlen(zip_path) + 1;
++#endif
+ bufsz += strlen(exec_prefix) + 1;
+
+ /* This is the only malloc call in this file */
+@@ -592,9 +598,11 @@
+ else
+ buf[0] = '\0';
+
++#ifdef WITH_ZIP_PATH
+ /* Next is the default zip path */
+ strcat(buf, zip_path);
+ strcat(buf, delimiter);
++#endif
+
+ /* Next goes merge of compile-time $PYTHONPATH with
+ * dynamically located prefix.
diff --git a/examples/python2.7/patches/plat-gnukfreebsd.diff b/examples/python2.7/patches/plat-gnukfreebsd.diff
new file mode 100644
index 0000000..77b06a0
--- /dev/null
+++ b/examples/python2.7/patches/plat-gnukfreebsd.diff
@@ -0,0 +1,2478 @@
+--- /dev/null
++++ b/Lib/plat-gnukfreebsd7/IN.py
+@@ -0,0 +1,809 @@
++# Generated by h2py from /usr/include/netinet/in.h
++_NETINET_IN_H = 1
++
++# Included from features.h
++_FEATURES_H = 1
++__USE_ANSI = 1
++__FAVOR_BSD = 1
++_ISOC99_SOURCE = 1
++_POSIX_SOURCE = 1
++_POSIX_C_SOURCE = 200809L
++_XOPEN_SOURCE = 700
++_XOPEN_SOURCE_EXTENDED = 1
++_LARGEFILE64_SOURCE = 1
++_BSD_SOURCE = 1
++_SVID_SOURCE = 1
++_ATFILE_SOURCE = 1
++_BSD_SOURCE = 1
++_SVID_SOURCE = 1
++__USE_ISOC99 = 1
++__USE_ISOC95 = 1
++_POSIX_SOURCE = 1
++_POSIX_C_SOURCE = 2
++_POSIX_C_SOURCE = 199506L
++_POSIX_C_SOURCE = 200112L
++_POSIX_C_SOURCE = 200809L
++__USE_POSIX_IMPLICITLY = 1
++__USE_POSIX = 1
++__USE_POSIX2 = 1
++__USE_POSIX199309 = 1
++__USE_POSIX199506 = 1
++__USE_XOPEN2K = 1
++__USE_ISOC99 = 1
++__USE_XOPEN2K8 = 1
++_ATFILE_SOURCE = 1
++__USE_XOPEN = 1
++__USE_XOPEN_EXTENDED = 1
++__USE_UNIX98 = 1
++_LARGEFILE_SOURCE = 1
++__USE_XOPEN2K8 = 1
++__USE_XOPEN2K = 1
++__USE_ISOC99 = 1
++__USE_XOPEN_EXTENDED = 1
++__USE_LARGEFILE = 1
++__USE_LARGEFILE64 = 1
++__USE_FILE_OFFSET64 = 1
++__USE_MISC = 1
++__USE_BSD = 1
++__USE_SVID = 1
++__USE_ATFILE = 1
++__USE_GNU = 1
++__USE_REENTRANT = 1
++__USE_FORTIFY_LEVEL = 2
++__USE_FORTIFY_LEVEL = 1
++__USE_FORTIFY_LEVEL = 0
++
++# Included from bits/predefs.h
++__STDC_IEC_559__ = 1
++__STDC_IEC_559_COMPLEX__ = 1
++__STDC_ISO_10646__ = 200009L
++__GNU_LIBRARY__ = 6
++__GLIBC__ = 2
++__GLIBC_MINOR__ = 11
++__GLIBC_HAVE_LONG_LONG = 1
++
++# Included from sys/cdefs.h
++_SYS_CDEFS_H = 1
++def __NTH(fct): return fct
++
++def __NTH(fct): return fct
++
++def __P(args): return args
++
++def __PMT(args): return args
++
++def __STRING(x): return #x
++
++def __bos(ptr): return __builtin_object_size (ptr, __USE_FORTIFY_LEVEL > 1)
++
++def __bos0(ptr): return __builtin_object_size (ptr, 0)
++
++def __warnattr(msg): return __attribute__((__warning__ (msg)))
++
++__flexarr = []
++__flexarr = [0]
++__flexarr = []
++__flexarr = [1]
++def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
++
++def __attribute__(xyz): return
++
++def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
++
++def __attribute_format_arg__(x): return
++
++
++# Included from bits/wordsize.h
++__WORDSIZE = 32
++__LDBL_COMPAT = 1
++def __LDBL_REDIR_DECL(name): return \
++
++__USE_LARGEFILE = 1
++__USE_LARGEFILE64 = 1
++__USE_EXTERN_INLINES = 1
++__USE_EXTERN_INLINES_IN_LIBC = 1
++
++# Included from gnu/stubs.h
++
++# Included from stdint.h
++_STDINT_H = 1
++
++# Included from bits/wchar.h
++_BITS_WCHAR_H = 1
++__WCHAR_MAX = (2147483647)
++__WCHAR_MIN = (-__WCHAR_MAX - 1)
++def __INT64_C(c): return c ## L
++
++def __UINT64_C(c): return c ## UL
++
++def __INT64_C(c): return c ## LL
++
++def __UINT64_C(c): return c ## ULL
++
++INT8_MIN = (-128)
++INT16_MIN = (-32767-1)
++INT32_MIN = (-2147483647-1)
++INT64_MIN = (-__INT64_C(9223372036854775807)-1)
++INT8_MAX = (127)
++INT16_MAX = (32767)
++INT32_MAX = (2147483647)
++INT64_MAX = (__INT64_C(9223372036854775807))
++UINT8_MAX = (255)
++UINT16_MAX = (65535)
++UINT64_MAX = (__UINT64_C(18446744073709551615))
++INT_LEAST8_MIN = (-128)
++INT_LEAST16_MIN = (-32767-1)
++INT_LEAST32_MIN = (-2147483647-1)
++INT_LEAST64_MIN = (-__INT64_C(9223372036854775807)-1)
++INT_LEAST8_MAX = (127)
++INT_LEAST16_MAX = (32767)
++INT_LEAST32_MAX = (2147483647)
++INT_LEAST64_MAX = (__INT64_C(9223372036854775807))
++UINT_LEAST8_MAX = (255)
++UINT_LEAST16_MAX = (65535)
++UINT_LEAST64_MAX = (__UINT64_C(18446744073709551615))
++INT_FAST8_MIN = (-128)
++INT_FAST16_MIN = (-9223372036854775807L-1)
++INT_FAST32_MIN = (-9223372036854775807L-1)
++INT_FAST16_MIN = (-2147483647-1)
++INT_FAST32_MIN = (-2147483647-1)
++INT_FAST64_MIN = (-__INT64_C(9223372036854775807)-1)
++INT_FAST8_MAX = (127)
++INT_FAST16_MAX = (9223372036854775807L)
++INT_FAST32_MAX = (9223372036854775807L)
++INT_FAST16_MAX = (2147483647)
++INT_FAST32_MAX = (2147483647)
++INT_FAST64_MAX = (__INT64_C(9223372036854775807))
++UINT_FAST8_MAX = (255)
++UINT_FAST64_MAX = (__UINT64_C(18446744073709551615))
++INTPTR_MIN = (-9223372036854775807L-1)
++INTPTR_MAX = (9223372036854775807L)
++INTPTR_MIN = (-2147483647-1)
++INTPTR_MAX = (2147483647)
++INTMAX_MIN = (-__INT64_C(9223372036854775807)-1)
++INTMAX_MAX = (__INT64_C(9223372036854775807))
++UINTMAX_MAX = (__UINT64_C(18446744073709551615))
++PTRDIFF_MIN = (-9223372036854775807L-1)
++PTRDIFF_MAX = (9223372036854775807L)
++PTRDIFF_MIN = (-2147483647-1)
++PTRDIFF_MAX = (2147483647)
++SIG_ATOMIC_MIN = (-2147483647-1)
++SIG_ATOMIC_MAX = (2147483647)
++WCHAR_MIN = __WCHAR_MIN
++WCHAR_MAX = __WCHAR_MAX
++def INT8_C(c): return c
++
++def INT16_C(c): return c
++
++def INT32_C(c): return c
++
++def INT64_C(c): return c ## L
++
++def INT64_C(c): return c ## LL
++
++def UINT8_C(c): return c
++
++def UINT16_C(c): return c
++
++def UINT32_C(c): return c ## U
++
++def UINT64_C(c): return c ## UL
++
++def UINT64_C(c): return c ## ULL
++
++def INTMAX_C(c): return c ## L
++
++def UINTMAX_C(c): return c ## UL
++
++def INTMAX_C(c): return c ## LL
++
++def UINTMAX_C(c): return c ## ULL
++
++
++# Included from sys/socket.h
++_SYS_SOCKET_H = 1
++
++# Included from sys/uio.h
++_SYS_UIO_H = 1
++from TYPES import *
++
++# Included from bits/uio.h
++_BITS_UIO_H = 1
++from TYPES import *
++UIO_MAXIOV = 1024
++
++# Included from bits/sigset.h
++_SIGSET_H_types = 1
++_SIGSET_H_fns = 1
++def __sigword(sig): return (((sig) - 1) >> 5)
++
++def __sigemptyset(set): return \
++
++def __sigfillset(set): return \
++
++def __sigisemptyset(set): return \
++
++
++# Included from bits/socket.h
++__BITS_SOCKET_H = 1
++
++# Included from limits.h
++_LIBC_LIMITS_H_ = 1
++MB_LEN_MAX = 16
++_LIMITS_H = 1
++CHAR_BIT = 8
++SCHAR_MIN = (-128)
++SCHAR_MAX = 127
++UCHAR_MAX = 255
++CHAR_MIN = 0
++CHAR_MAX = UCHAR_MAX
++CHAR_MIN = SCHAR_MIN
++CHAR_MAX = SCHAR_MAX
++SHRT_MIN = (-32768)
++SHRT_MAX = 32767
++USHRT_MAX = 65535
++INT_MAX = 2147483647
++LONG_MAX = 9223372036854775807L
++LONG_MAX = 2147483647L
++LONG_MIN = (-LONG_MAX - 1L)
++
++# Included from bits/posix1_lim.h
++_BITS_POSIX1_LIM_H = 1
++_POSIX_AIO_LISTIO_MAX = 2
++_POSIX_AIO_MAX = 1
++_POSIX_ARG_MAX = 4096
++_POSIX_CHILD_MAX = 25
++_POSIX_CHILD_MAX = 6
++_POSIX_DELAYTIMER_MAX = 32
++_POSIX_HOST_NAME_MAX = 255
++_POSIX_LINK_MAX = 8
++_POSIX_LOGIN_NAME_MAX = 9
++_POSIX_MAX_CANON = 255
++_POSIX_MAX_INPUT = 255
++_POSIX_MQ_OPEN_MAX = 8
++_POSIX_MQ_PRIO_MAX = 32
++_POSIX_NAME_MAX = 14
++_POSIX_NGROUPS_MAX = 8
++_POSIX_NGROUPS_MAX = 0
++_POSIX_OPEN_MAX = 20
++_POSIX_OPEN_MAX = 16
++_POSIX_FD_SETSIZE = _POSIX_OPEN_MAX
++_POSIX_PATH_MAX = 256
++_POSIX_PIPE_BUF = 512
++_POSIX_RE_DUP_MAX = 255
++_POSIX_RTSIG_MAX = 8
++_POSIX_SEM_NSEMS_MAX = 256
++_POSIX_SEM_VALUE_MAX = 32767
++_POSIX_SIGQUEUE_MAX = 32
++_POSIX_SSIZE_MAX = 32767
++_POSIX_STREAM_MAX = 8
++_POSIX_SYMLINK_MAX = 255
++_POSIX_SYMLOOP_MAX = 8
++_POSIX_TIMER_MAX = 32
++_POSIX_TTY_NAME_MAX = 9
++_POSIX_TZNAME_MAX = 6
++_POSIX_QLIMIT = 1
++_POSIX_HIWAT = _POSIX_PIPE_BUF
++_POSIX_UIO_MAXIOV = 16
++_POSIX_CLOCKRES_MIN = 20000000
++
++# Included from bits/local_lim.h
++
++# Included from sys/syslimits.h
++ARG_MAX = 262144
++CHILD_MAX = 40
++LINK_MAX = 32767
++MAX_CANON = 255
++MAX_INPUT = 255
++NAME_MAX = 255
++NGROUPS_MAX = 1023
++OPEN_MAX = 64
++PATH_MAX = 1024
++PIPE_BUF = 512
++IOV_MAX = 1024
++_POSIX_THREAD_KEYS_MAX = 128
++PTHREAD_KEYS_MAX = 1024
++_POSIX_THREAD_DESTRUCTOR_ITERATIONS = 4
++PTHREAD_DESTRUCTOR_ITERATIONS = _POSIX_THREAD_DESTRUCTOR_ITERATIONS
++_POSIX_THREAD_THREADS_MAX = 64
++PTHREAD_THREADS_MAX = 1024
++AIO_PRIO_DELTA_MAX = 20
++PTHREAD_STACK_MIN = 16384
++TIMER_MAX = 256
++DELAYTIMER_MAX = 2147483647
++SSIZE_MAX = LONG_MAX
++NGROUPS_MAX = 8
++
++# Included from bits/posix2_lim.h
++_BITS_POSIX2_LIM_H = 1
++_POSIX2_BC_BASE_MAX = 99
++_POSIX2_BC_DIM_MAX = 2048
++_POSIX2_BC_SCALE_MAX = 99
++_POSIX2_BC_STRING_MAX = 1000
++_POSIX2_COLL_WEIGHTS_MAX = 2
++_POSIX2_EXPR_NEST_MAX = 32
++_POSIX2_LINE_MAX = 2048
++_POSIX2_RE_DUP_MAX = 255
++_POSIX2_CHARCLASS_NAME_MAX = 14
++BC_BASE_MAX = _POSIX2_BC_BASE_MAX
++BC_DIM_MAX = _POSIX2_BC_DIM_MAX
++BC_SCALE_MAX = _POSIX2_BC_SCALE_MAX
++BC_STRING_MAX = _POSIX2_BC_STRING_MAX
++COLL_WEIGHTS_MAX = 255
++EXPR_NEST_MAX = _POSIX2_EXPR_NEST_MAX
++LINE_MAX = _POSIX2_LINE_MAX
++CHARCLASS_NAME_MAX = 2048
++RE_DUP_MAX = (0x7fff)
++
++# Included from bits/xopen_lim.h
++_XOPEN_LIM_H = 1
++
++# Included from bits/stdio_lim.h
++L_tmpnam = 20
++TMP_MAX = 238328
++FILENAME_MAX = 1024
++L_ctermid = 9
++L_cuserid = 9
++FOPEN_MAX = 64
++IOV_MAX = 1024
++_XOPEN_IOV_MAX = _POSIX_UIO_MAXIOV
++NL_ARGMAX = _POSIX_ARG_MAX
++NL_LANGMAX = _POSIX2_LINE_MAX
++NL_MSGMAX = INT_MAX
++NL_NMAX = INT_MAX
++NL_SETMAX = INT_MAX
++NL_TEXTMAX = INT_MAX
++NZERO = 20
++WORD_BIT = 16
++WORD_BIT = 32
++WORD_BIT = 64
++WORD_BIT = 16
++WORD_BIT = 32
++WORD_BIT = 64
++WORD_BIT = 32
++LONG_BIT = 32
++LONG_BIT = 64
++LONG_BIT = 32
++LONG_BIT = 64
++LONG_BIT = 64
++LONG_BIT = 32
++
++# Included from bits/types.h
++_BITS_TYPES_H = 1
++__S32_TYPE = int
++__SWORD_TYPE = int
++__SLONG32_TYPE = int
++
++# Included from bits/typesizes.h
++_BITS_TYPESIZES_H = 1
++__PID_T_TYPE = __S32_TYPE
++__CLOCK_T_TYPE = __S32_TYPE
++__SWBLK_T_TYPE = __S32_TYPE
++__CLOCKID_T_TYPE = __S32_TYPE
++__TIMER_T_TYPE = __S32_TYPE
++__SSIZE_T_TYPE = __SWORD_TYPE
++__FD_SETSIZE = 1024
++PF_UNSPEC = 0
++PF_LOCAL = 1
++PF_UNIX = PF_LOCAL
++PF_FILE = PF_LOCAL
++PF_INET = 2
++PF_IMPLINK = 3
++PF_PUP = 4
++PF_CHAOS = 5
++PF_NS = 6
++PF_ISO = 7
++PF_OSI = PF_ISO
++PF_ECMA = 8
++PF_DATAKIT = 9
++PF_CCITT = 10
++PF_SNA = 11
++PF_DECnet = 12
++PF_DLI = 13
++PF_LAT = 14
++PF_HYLINK = 15
++PF_APPLETALK = 16
++PF_ROUTE = 17
++PF_LINK = 18
++PF_XTP = 19
++PF_COIP = 20
++PF_CNT = 21
++PF_RTIP = 22
++PF_IPX = 23
++PF_SIP = 24
++PF_PIP = 25
++PF_ISDN = 26
++PF_KEY = 27
++PF_INET6 = 28
++PF_NATM = 29
++PF_ATM = 30
++PF_HDRCMPLT = 31
++PF_NETGRAPH = 32
++PF_MAX = 33
++AF_UNSPEC = PF_UNSPEC
++AF_LOCAL = PF_LOCAL
++AF_UNIX = PF_UNIX
++AF_FILE = PF_FILE
++AF_INET = PF_INET
++AF_IMPLINK = PF_IMPLINK
++AF_PUP = PF_PUP
++AF_CHAOS = PF_CHAOS
++AF_NS = PF_NS
++AF_ISO = PF_ISO
++AF_OSI = PF_OSI
++AF_ECMA = PF_ECMA
++AF_DATAKIT = PF_DATAKIT
++AF_CCITT = PF_CCITT
++AF_SNA = PF_SNA
++AF_DECnet = PF_DECnet
++AF_DLI = PF_DLI
++AF_LAT = PF_LAT
++AF_HYLINK = PF_HYLINK
++AF_APPLETALK = PF_APPLETALK
++AF_ROUTE = PF_ROUTE
++AF_LINK = PF_LINK
++pseudo_AF_XTP = PF_XTP
++AF_COIP = PF_COIP
++AF_CNT = PF_CNT
++pseudo_AF_RTIP = PF_RTIP
++AF_IPX = PF_IPX
++AF_SIP = PF_SIP
++pseudo_AF_PIP = PF_PIP
++AF_ISDN = PF_ISDN
++AF_E164 = AF_ISDN
++pseudo_AF_KEY = PF_KEY
++AF_INET6 = PF_INET6
++AF_NATM = PF_NATM
++AF_ATM = PF_ATM
++pseudo_AF_HDRCMPLT = PF_HDRCMPLT
++AF_NETGRAPH = PF_NETGRAPH
++AF_MAX = PF_MAX
++SOMAXCONN = 128
++
++# Included from bits/sockaddr.h
++_BITS_SOCKADDR_H = 1
++def __SOCKADDR_COMMON(sa_prefix): return \
++
++_HAVE_SA_LEN = 1
++_SS_SIZE = 128
++def CMSG_FIRSTHDR(mhdr): return \
++
++CMGROUP_MAX = 16
++SOL_SOCKET = 0xffff
++LOCAL_PEERCRED = 0x001
++LOCAL_CREDS = 0x002
++LOCAL_CONNWAIT = 0x004
++
++# Included from bits/socket2.h
++def IN_CLASSA(a): return ((((in_addr_t)(a)) & (-2147483648)) == 0)
++
++IN_CLASSA_NET = (-16777216)
++IN_CLASSA_NSHIFT = 24
++IN_CLASSA_HOST = ((-1) & ~IN_CLASSA_NET)
++IN_CLASSA_MAX = 128
++def IN_CLASSB(a): return ((((in_addr_t)(a)) & (-1073741824)) == (-2147483648))
++
++IN_CLASSB_NET = (-65536)
++IN_CLASSB_NSHIFT = 16
++IN_CLASSB_HOST = ((-1) & ~IN_CLASSB_NET)
++IN_CLASSB_MAX = 65536
++def IN_CLASSC(a): return ((((in_addr_t)(a)) & (-536870912)) == (-1073741824))
++
++IN_CLASSC_NET = (-256)
++IN_CLASSC_NSHIFT = 8
++IN_CLASSC_HOST = ((-1) & ~IN_CLASSC_NET)
++def IN_CLASSD(a): return ((((in_addr_t)(a)) & (-268435456)) == (-536870912))
++
++def IN_MULTICAST(a): return IN_CLASSD(a)
++
++def IN_EXPERIMENTAL(a): return ((((in_addr_t)(a)) & (-536870912)) == (-536870912))
++
++def IN_BADCLASS(a): return ((((in_addr_t)(a)) & (-268435456)) == (-268435456))
++
++IN_LOOPBACKNET = 127
++INET_ADDRSTRLEN = 16
++INET6_ADDRSTRLEN = 46
++
++# Included from bits/in.h
++IMPLINK_IP = 155
++IMPLINK_LOWEXPER = 156
++IMPLINK_HIGHEXPER = 158
++IPPROTO_DIVERT = 258
++SOL_IP = 0
++IP_OPTIONS = 1
++IP_HDRINCL = 2
++IP_TOS = 3
++IP_TTL = 4
++IP_RECVOPTS = 5
++IP_RECVRETOPTS = 6
++IP_RECVDSTADDR = 7
++IP_SENDSRCADDR = IP_RECVDSTADDR
++IP_RETOPTS = 8
++IP_MULTICAST_IF = 9
++IP_MULTICAST_TTL = 10
++IP_MULTICAST_LOOP = 11
++IP_ADD_MEMBERSHIP = 12
++IP_DROP_MEMBERSHIP = 13
++IP_MULTICAST_VIF = 14
++IP_RSVP_ON = 15
++IP_RSVP_OFF = 16
++IP_RSVP_VIF_ON = 17
++IP_RSVP_VIF_OFF = 18
++IP_PORTRANGE = 19
++IP_RECVIF = 20
++IP_IPSEC_POLICY = 21
++IP_FAITH = 22
++IP_ONESBCAST = 23
++IP_NONLOCALOK = 24
++IP_FW_TABLE_ADD = 40
++IP_FW_TABLE_DEL = 41
++IP_FW_TABLE_FLUSH = 42
++IP_FW_TABLE_GETSIZE = 43
++IP_FW_TABLE_LIST = 44
++IP_FW_ADD = 50
++IP_FW_DEL = 51
++IP_FW_FLUSH = 52
++IP_FW_ZERO = 53
++IP_FW_GET = 54
++IP_FW_RESETLOG = 55
++IP_FW_NAT_CFG = 56
++IP_FW_NAT_DEL = 57
++IP_FW_NAT_GET_CONFIG = 58
++IP_FW_NAT_GET_LOG = 59
++IP_DUMMYNET_CONFIGURE = 60
++IP_DUMMYNET_DEL = 61
++IP_DUMMYNET_FLUSH = 62
++IP_DUMMYNET_GET = 64
++IP_RECVTTL = 65
++IP_MINTTL = 66
++IP_DONTFRAG = 67
++IP_ADD_SOURCE_MEMBERSHIP = 70
++IP_DROP_SOURCE_MEMBERSHIP = 71
++IP_BLOCK_SOURCE = 72
++IP_UNBLOCK_SOURCE = 73
++IP_MSFILTER = 74
++MCAST_JOIN_GROUP = 80
++MCAST_LEAVE_GROUP = 81
++MCAST_JOIN_SOURCE_GROUP = 82
++MCAST_LEAVE_SOURCE_GROUP = 83
++MCAST_BLOCK_SOURCE = 84
++MCAST_UNBLOCK_SOURCE = 85
++IP_DEFAULT_MULTICAST_TTL = 1
++IP_DEFAULT_MULTICAST_LOOP = 1
++IP_MIN_MEMBERSHIPS = 31
++IP_MAX_MEMBERSHIPS = 4095
++IP_MAX_SOURCE_FILTER = 1024
++MCAST_UNDEFINED = 0
++MCAST_INCLUDE = 1
++MCAST_EXCLUDE = 2
++IP_PORTRANGE_DEFAULT = 0
++IP_PORTRANGE_HIGH = 1
++IP_PORTRANGE_LOW = 2
++IPCTL_FORWARDING = 1
++IPCTL_SENDREDIRECTS = 2
++IPCTL_DEFTTL = 3
++IPCTL_DEFMTU = 4
++IPCTL_RTEXPIRE = 5
++IPCTL_RTMINEXPIRE = 6
++IPCTL_RTMAXCACHE = 7
++IPCTL_SOURCEROUTE = 8
++IPCTL_DIRECTEDBROADCAST = 9
++IPCTL_INTRQMAXLEN = 10
++IPCTL_INTRQDROPS = 11
++IPCTL_STATS = 12
++IPCTL_ACCEPTSOURCEROUTE = 13
++IPCTL_FASTFORWARDING = 14
++IPCTL_KEEPFAITH = 15
++IPCTL_GIF_TTL = 16
++IPCTL_MAXID = 17
++IPV6_SOCKOPT_RESERVED1 = 3
++IPV6_UNICAST_HOPS = 4
++IPV6_MULTICAST_IF = 9
++IPV6_MULTICAST_HOPS = 10
++IPV6_MULTICAST_LOOP = 11
++IPV6_JOIN_GROUP = 12
++IPV6_LEAVE_GROUP = 13
++IPV6_PORTRANGE = 14
++ICMP6_FILTER = 18
++IPV6_CHECKSUM = 26
++IPV6_V6ONLY = 27
++IPV6_IPSEC_POLICY = 28
++IPV6_FAITH = 29
++IPV6_FW_ADD = 30
++IPV6_FW_DEL = 31
++IPV6_FW_FLUSH = 32
++IPV6_FW_ZERO = 33
++IPV6_FW_GET = 34
++IPV6_RTHDRDSTOPTS = 35
++IPV6_RECVPKTINFO = 36
++IPV6_RECVHOPLIMIT = 37
++IPV6_RECVRTHDR = 38
++IPV6_RECVHOPOPTS = 39
++IPV6_RECVDSTOPTS = 40
++IPV6_USE_MIN_MTU = 42
++IPV6_RECVPATHMTU = 43
++IPV6_PATHMTU = 44
++IPV6_PKTINFO = 46
++IPV6_HOPLIMIT = 47
++IPV6_NEXTHOP = 48
++IPV6_HOPOPTS = 49
++IPV6_DSTOPTS = 50
++IPV6_RTHDR = 51
++IPV6_RECVTCLASS = 57
++IPV6_AUTOFLOWLABEL = 59
++IPV6_TCLASS = 61
++IPV6_DONTFRAG = 62
++IPV6_PREFER_TEMPADDR = 63
++IPV6_ADD_MEMBERSHIP = IPV6_JOIN_GROUP
++IPV6_DROP_MEMBERSHIP = IPV6_LEAVE_GROUP
++IPV6_RXHOPOPTS = IPV6_HOPOPTS
++IPV6_RXDSTOPTS = IPV6_DSTOPTS
++SOL_IPV6 = 41
++SOL_ICMPV6 = 58
++IPV6_DEFAULT_MULTICAST_HOPS = 1
++IPV6_DEFAULT_MULTICAST_LOOP = 1
++IPV6_PORTRANGE_DEFAULT = 0
++IPV6_PORTRANGE_HIGH = 1
++IPV6_PORTRANGE_LOW = 2
++IPV6_RTHDR_LOOSE = 0
++IPV6_RTHDR_STRICT = 1
++IPV6_RTHDR_TYPE_0 = 0
++IPV6CTL_FORWARDING = 1
++IPV6CTL_SENDREDIRECTS = 2
++IPV6CTL_DEFHLIM = 3
++IPV6CTL_FORWSRCRT = 5
++IPV6CTL_STATS = 6
++IPV6CTL_MRTSTATS = 7
++IPV6CTL_MRTPROTO = 8
++IPV6CTL_MAXFRAGPACKETS = 9
++IPV6CTL_SOURCECHECK = 10
++IPV6CTL_SOURCECHECK_LOGINT = 11
++IPV6CTL_ACCEPT_RTADV = 12
++IPV6CTL_KEEPFAITH = 13
++IPV6CTL_LOG_INTERVAL = 14
++IPV6CTL_HDRNESTLIMIT = 15
++IPV6CTL_DAD_COUNT = 16
++IPV6CTL_AUTO_FLOWLABEL = 17
++IPV6CTL_DEFMCASTHLIM = 18
++IPV6CTL_GIF_HLIM = 19
++IPV6CTL_KAME_VERSION = 20
++IPV6CTL_USE_DEPRECATED = 21
++IPV6CTL_RR_PRUNE = 22
++IPV6CTL_V6ONLY = 24
++IPV6CTL_RTEXPIRE = 25
++IPV6CTL_RTMINEXPIRE = 26
++IPV6CTL_RTMAXCACHE = 27
++IPV6CTL_USETEMPADDR = 32
++IPV6CTL_TEMPPLTIME = 33
++IPV6CTL_TEMPVLTIME = 34
++IPV6CTL_AUTO_LINKLOCAL = 35
++IPV6CTL_RIP6STATS = 36
++IPV6CTL_PREFER_TEMPADDR = 37
++IPV6CTL_ADDRCTLPOLICY = 38
++IPV6CTL_USE_DEFAULTZONE = 39
++IPV6CTL_MAXFRAGS = 41
++IPV6CTL_MCAST_PMTU = 44
++IPV6CTL_STEALTH = 45
++ICMPV6CTL_ND6_ONLINKNSRFC4861 = 47
++IPV6CTL_MAXID = 48
++
++# Included from endian.h
++_ENDIAN_H = 1
++__LITTLE_ENDIAN = 1234
++__BIG_ENDIAN = 4321
++__PDP_ENDIAN = 3412
++
++# Included from bits/endian.h
++__BYTE_ORDER = __LITTLE_ENDIAN
++__FLOAT_WORD_ORDER = __BYTE_ORDER
++LITTLE_ENDIAN = __LITTLE_ENDIAN
++BIG_ENDIAN = __BIG_ENDIAN
++PDP_ENDIAN = __PDP_ENDIAN
++BYTE_ORDER = __BYTE_ORDER
++
++# Included from bits/byteswap.h
++_BITS_BYTESWAP_H = 1
++def __bswap_constant_16(x): return \
++
++def __bswap_16(x): return \
++
++def __bswap_16(x): return \
++
++def __bswap_constant_32(x): return \
++
++def __bswap_32(x): return \
++
++def __bswap_32(x): return \
++
++def __bswap_32(x): return \
++
++def __bswap_constant_64(x): return \
++
++def __bswap_64(x): return \
++
++def htobe16(x): return __bswap_16 (x)
++
++def htole16(x): return (x)
++
++def be16toh(x): return __bswap_16 (x)
++
++def le16toh(x): return (x)
++
++def htobe32(x): return __bswap_32 (x)
++
++def htole32(x): return (x)
++
++def be32toh(x): return __bswap_32 (x)
++
++def le32toh(x): return (x)
++
++def htobe64(x): return __bswap_64 (x)
++
++def htole64(x): return (x)
++
++def be64toh(x): return __bswap_64 (x)
++
++def le64toh(x): return (x)
++
++def htobe16(x): return (x)
++
++def htole16(x): return __bswap_16 (x)
++
++def be16toh(x): return (x)
++
++def le16toh(x): return __bswap_16 (x)
++
++def htobe32(x): return (x)
++
++def htole32(x): return __bswap_32 (x)
++
++def be32toh(x): return (x)
++
++def le32toh(x): return __bswap_32 (x)
++
++def htobe64(x): return (x)
++
++def htole64(x): return __bswap_64 (x)
++
++def be64toh(x): return (x)
++
++def le64toh(x): return __bswap_64 (x)
++
++def ntohl(x): return (x)
++
++def ntohs(x): return (x)
++
++def htonl(x): return (x)
++
++def htons(x): return (x)
++
++def ntohl(x): return __bswap_32 (x)
++
++def ntohs(x): return __bswap_16 (x)
++
++def htonl(x): return __bswap_32 (x)
++
++def htons(x): return __bswap_16 (x)
++
++def IN6_IS_ADDR_UNSPECIFIED(a): return \
++
++def IN6_IS_ADDR_LOOPBACK(a): return \
++
++def IN6_IS_ADDR_LINKLOCAL(a): return \
++
++def IN6_IS_ADDR_SITELOCAL(a): return \
++
++def IN6_IS_ADDR_V4MAPPED(a): return \
++
++def IN6_IS_ADDR_V4COMPAT(a): return \
++
++def IN6_IS_ADDR_MC_NODELOCAL(a): return \
++
++def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
++
++def IN6_IS_ADDR_MC_SITELOCAL(a): return \
++
++def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
++
++def IN6_IS_ADDR_MC_GLOBAL(a): return \
++
+--- /dev/null
++++ b/Lib/plat-gnukfreebsd7/TYPES.py
+@@ -0,0 +1,303 @@
++# Generated by h2py from /usr/include/sys/types.h
++_SYS_TYPES_H = 1
++
++# Included from features.h
++_FEATURES_H = 1
++__USE_ANSI = 1
++__FAVOR_BSD = 1
++_ISOC99_SOURCE = 1
++_POSIX_SOURCE = 1
++_POSIX_C_SOURCE = 200809L
++_XOPEN_SOURCE = 700
++_XOPEN_SOURCE_EXTENDED = 1
++_LARGEFILE64_SOURCE = 1
++_BSD_SOURCE = 1
++_SVID_SOURCE = 1
++_ATFILE_SOURCE = 1
++_BSD_SOURCE = 1
++_SVID_SOURCE = 1
++__USE_ISOC99 = 1
++__USE_ISOC95 = 1
++_POSIX_SOURCE = 1
++_POSIX_C_SOURCE = 2
++_POSIX_C_SOURCE = 199506L
++_POSIX_C_SOURCE = 200112L
++_POSIX_C_SOURCE = 200809L
++__USE_POSIX_IMPLICITLY = 1
++__USE_POSIX = 1
++__USE_POSIX2 = 1
++__USE_POSIX199309 = 1
++__USE_POSIX199506 = 1
++__USE_XOPEN2K = 1
++__USE_ISOC99 = 1
++__USE_XOPEN2K8 = 1
++_ATFILE_SOURCE = 1
++__USE_XOPEN = 1
++__USE_XOPEN_EXTENDED = 1
++__USE_UNIX98 = 1
++_LARGEFILE_SOURCE = 1
++__USE_XOPEN2K8 = 1
++__USE_XOPEN2K = 1
++__USE_ISOC99 = 1
++__USE_XOPEN_EXTENDED = 1
++__USE_LARGEFILE = 1
++__USE_LARGEFILE64 = 1
++__USE_FILE_OFFSET64 = 1
++__USE_MISC = 1
++__USE_BSD = 1
++__USE_SVID = 1
++__USE_ATFILE = 1
++__USE_GNU = 1
++__USE_REENTRANT = 1
++__USE_FORTIFY_LEVEL = 2
++__USE_FORTIFY_LEVEL = 1
++__USE_FORTIFY_LEVEL = 0
++
++# Included from bits/predefs.h
++__STDC_IEC_559__ = 1
++__STDC_IEC_559_COMPLEX__ = 1
++__STDC_ISO_10646__ = 200009L
++__GNU_LIBRARY__ = 6
++__GLIBC__ = 2
++__GLIBC_MINOR__ = 11
++__GLIBC_HAVE_LONG_LONG = 1
++
++# Included from sys/cdefs.h
++_SYS_CDEFS_H = 1
++def __NTH(fct): return fct
++
++def __NTH(fct): return fct
++
++def __P(args): return args
++
++def __PMT(args): return args
++
++def __STRING(x): return #x
++
++def __bos(ptr): return __builtin_object_size (ptr, __USE_FORTIFY_LEVEL > 1)
++
++def __bos0(ptr): return __builtin_object_size (ptr, 0)
++
++def __warnattr(msg): return __attribute__((__warning__ (msg)))
++
++__flexarr = []
++__flexarr = [0]
++__flexarr = []
++__flexarr = [1]
++def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
++
++def __attribute__(xyz): return
++
++def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
++
++def __attribute_format_arg__(x): return
++
++
++# Included from bits/wordsize.h
++__WORDSIZE = 32
++__LDBL_COMPAT = 1
++def __LDBL_REDIR_DECL(name): return \
++
++__USE_LARGEFILE = 1
++__USE_LARGEFILE64 = 1
++__USE_EXTERN_INLINES = 1
++__USE_EXTERN_INLINES_IN_LIBC = 1
++
++# Included from gnu/stubs.h
++
++# Included from bits/types.h
++_BITS_TYPES_H = 1
++__S32_TYPE = int
++__SWORD_TYPE = int
++__SLONG32_TYPE = int
++
++# Included from bits/typesizes.h
++_BITS_TYPESIZES_H = 1
++__PID_T_TYPE = __S32_TYPE
++__CLOCK_T_TYPE = __S32_TYPE
++__SWBLK_T_TYPE = __S32_TYPE
++__CLOCKID_T_TYPE = __S32_TYPE
++__TIMER_T_TYPE = __S32_TYPE
++__SSIZE_T_TYPE = __SWORD_TYPE
++__FD_SETSIZE = 1024
++
++# Included from time.h
++_TIME_H = 1
++
++# Included from bits/time.h
++_BITS_TIME_H = 1
++CLOCKS_PER_SEC = 1000000l
++CLK_TCK = 128
++CLOCK_REALTIME = 0
++CLOCK_PROCESS_CPUTIME_ID = 2
++CLOCK_THREAD_CPUTIME_ID = 3
++CLOCK_MONOTONIC = 4
++CLOCK_VIRTUAL = 1
++CLOCK_PROF = 2
++CLOCK_UPTIME = 5
++CLOCK_UPTIME_PRECISE = 7
++CLOCK_UPTIME_FAST = 8
++CLOCK_REALTIME_PRECISE = 9
++CLOCK_REALTIME_FAST = 10
++CLOCK_MONOTONIC_PRECISE = 11
++CLOCK_MONOTONIC_FAST = 12
++CLOCK_SECOND = 13
++TIMER_RELTIME = 0
++TIMER_ABSTIME = 1
++_STRUCT_TIMEVAL = 1
++CLK_TCK = CLOCKS_PER_SEC
++__clock_t_defined = 1
++__time_t_defined = 1
++__clockid_t_defined = 1
++__timer_t_defined = 1
++__timespec_defined = 1
++
++# Included from xlocale.h
++_XLOCALE_H = 1
++def __isleap(year): return \
++
++__BIT_TYPES_DEFINED__ = 1
++
++# Included from endian.h
++_ENDIAN_H = 1
++__LITTLE_ENDIAN = 1234
++__BIG_ENDIAN = 4321
++__PDP_ENDIAN = 3412
++
++# Included from bits/endian.h
++__BYTE_ORDER = __LITTLE_ENDIAN
++__FLOAT_WORD_ORDER = __BYTE_ORDER
++LITTLE_ENDIAN = __LITTLE_ENDIAN
++BIG_ENDIAN = __BIG_ENDIAN
++PDP_ENDIAN = __PDP_ENDIAN
++BYTE_ORDER = __BYTE_ORDER
++
++# Included from bits/byteswap.h
++_BITS_BYTESWAP_H = 1
++def __bswap_constant_16(x): return \
++
++def __bswap_16(x): return \
++
++def __bswap_16(x): return \
++
++def __bswap_constant_32(x): return \
++
++def __bswap_32(x): return \
++
++def __bswap_32(x): return \
++
++def __bswap_32(x): return \
++
++def __bswap_constant_64(x): return \
++
++def __bswap_64(x): return \
++
++def htobe16(x): return __bswap_16 (x)
++
++def htole16(x): return (x)
++
++def be16toh(x): return __bswap_16 (x)
++
++def le16toh(x): return (x)
++
++def htobe32(x): return __bswap_32 (x)
++
++def htole32(x): return (x)
++
++def be32toh(x): return __bswap_32 (x)
++
++def le32toh(x): return (x)
++
++def htobe64(x): return __bswap_64 (x)
++
++def htole64(x): return (x)
++
++def be64toh(x): return __bswap_64 (x)
++
++def le64toh(x): return (x)
++
++def htobe16(x): return (x)
++
++def htole16(x): return __bswap_16 (x)
++
++def be16toh(x): return (x)
++
++def le16toh(x): return __bswap_16 (x)
++
++def htobe32(x): return (x)
++
++def htole32(x): return __bswap_32 (x)
++
++def be32toh(x): return (x)
++
++def le32toh(x): return __bswap_32 (x)
++
++def htobe64(x): return (x)
++
++def htole64(x): return __bswap_64 (x)
++
++def be64toh(x): return (x)
++
++def le64toh(x): return __bswap_64 (x)
++
++
++# Included from sys/select.h
++_SYS_SELECT_H = 1
++
++# Included from bits/select.h
++def __FD_ZERO(fdsp): return \
++
++def __FD_ZERO(set): return \
++
++
++# Included from bits/sigset.h
++_SIGSET_H_types = 1
++_SIGSET_H_fns = 1
++def __sigword(sig): return (((sig) - 1) >> 5)
++
++def __sigemptyset(set): return \
++
++def __sigfillset(set): return \
++
++def __sigisemptyset(set): return \
++
++def __FDELT(d): return ((d) / __NFDBITS)
++
++FD_SETSIZE = __FD_SETSIZE
++def FD_ZERO(fdsetp): return __FD_ZERO (fdsetp)
++
++
++# Included from sys/sysmacros.h
++_SYS_SYSMACROS_H = 1
++def minor(dev): return ((int)((dev) & (-65281)))
++
++def gnu_dev_major(dev): return major (dev)
++
++def gnu_dev_minor(dev): return minor (dev)
++
++
++# Included from bits/pthreadtypes.h
++_BITS_PTHREADTYPES_H = 1
++
++# Included from bits/sched.h
++SCHED_OTHER = 2
++SCHED_FIFO = 1
++SCHED_RR = 3
++CSIGNAL = 0x000000ff
++CLONE_VM = 0x00000100
++CLONE_FS = 0x00000200
++CLONE_FILES = 0x00000400
++CLONE_SIGHAND = 0x00000800
++CLONE_PTRACE = 0x00002000
++CLONE_VFORK = 0x00004000
++CLONE_SYSVSEM = 0x00040000
++__defined_schedparam = 1
++__CPU_SETSIZE = 128
++def __CPUELT(cpu): return ((cpu) / __NCPUBITS)
++
++def __CPU_ALLOC_SIZE(count): return \
++
++def __CPU_ALLOC(count): return __sched_cpualloc (count)
++
++def __CPU_FREE(cpuset): return __sched_cpufree (cpuset)
++
+--- /dev/null
++++ b/Lib/plat-gnukfreebsd7/DLFCN.py
+@@ -0,0 +1,118 @@
++# Generated by h2py from /usr/include/dlfcn.h
++_DLFCN_H = 1
++
++# Included from features.h
++_FEATURES_H = 1
++__USE_ANSI = 1
++__FAVOR_BSD = 1
++_ISOC99_SOURCE = 1
++_POSIX_SOURCE = 1
++_POSIX_C_SOURCE = 200809L
++_XOPEN_SOURCE = 700
++_XOPEN_SOURCE_EXTENDED = 1
++_LARGEFILE64_SOURCE = 1
++_BSD_SOURCE = 1
++_SVID_SOURCE = 1
++_ATFILE_SOURCE = 1
++_BSD_SOURCE = 1
++_SVID_SOURCE = 1
++__USE_ISOC99 = 1
++__USE_ISOC95 = 1
++_POSIX_SOURCE = 1
++_POSIX_C_SOURCE = 2
++_POSIX_C_SOURCE = 199506L
++_POSIX_C_SOURCE = 200112L
++_POSIX_C_SOURCE = 200809L
++__USE_POSIX_IMPLICITLY = 1
++__USE_POSIX = 1
++__USE_POSIX2 = 1
++__USE_POSIX199309 = 1
++__USE_POSIX199506 = 1
++__USE_XOPEN2K = 1
++__USE_ISOC99 = 1
++__USE_XOPEN2K8 = 1
++_ATFILE_SOURCE = 1
++__USE_XOPEN = 1
++__USE_XOPEN_EXTENDED = 1
++__USE_UNIX98 = 1
++_LARGEFILE_SOURCE = 1
++__USE_XOPEN2K8 = 1
++__USE_XOPEN2K = 1
++__USE_ISOC99 = 1
++__USE_XOPEN_EXTENDED = 1
++__USE_LARGEFILE = 1
++__USE_LARGEFILE64 = 1
++__USE_FILE_OFFSET64 = 1
++__USE_MISC = 1
++__USE_BSD = 1
++__USE_SVID = 1
++__USE_ATFILE = 1
++__USE_GNU = 1
++__USE_REENTRANT = 1
++__USE_FORTIFY_LEVEL = 2
++__USE_FORTIFY_LEVEL = 1
++__USE_FORTIFY_LEVEL = 0
++
++# Included from bits/predefs.h
++__STDC_IEC_559__ = 1
++__STDC_IEC_559_COMPLEX__ = 1
++__STDC_ISO_10646__ = 200009L
++__GNU_LIBRARY__ = 6
++__GLIBC__ = 2
++__GLIBC_MINOR__ = 11
++__GLIBC_HAVE_LONG_LONG = 1
++
++# Included from sys/cdefs.h
++_SYS_CDEFS_H = 1
++def __NTH(fct): return fct
++
++def __NTH(fct): return fct
++
++def __P(args): return args
++
++def __PMT(args): return args
++
++def __STRING(x): return #x
++
++def __bos(ptr): return __builtin_object_size (ptr, __USE_FORTIFY_LEVEL > 1)
++
++def __bos0(ptr): return __builtin_object_size (ptr, 0)
++
++def __warnattr(msg): return __attribute__((__warning__ (msg)))
++
++__flexarr = []
++__flexarr = [0]
++__flexarr = []
++__flexarr = [1]
++def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
++
++def __attribute__(xyz): return
++
++def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
++
++def __attribute_format_arg__(x): return
++
++
++# Included from bits/wordsize.h
++__WORDSIZE = 32
++__LDBL_COMPAT = 1
++def __LDBL_REDIR_DECL(name): return \
++
++__USE_LARGEFILE = 1
++__USE_LARGEFILE64 = 1
++__USE_EXTERN_INLINES = 1
++__USE_EXTERN_INLINES_IN_LIBC = 1
++
++# Included from gnu/stubs.h
++
++# Included from bits/dlfcn.h
++RTLD_LAZY = 0x00001
++RTLD_NOW = 0x00002
++RTLD_BINDING_MASK = 0x3
++RTLD_NOLOAD = 0x00004
++RTLD_DEEPBIND = 0x00008
++RTLD_GLOBAL = 0x00100
++RTLD_LOCAL = 0
++RTLD_NODELETE = 0x01000
++LM_ID_BASE = 0
++LM_ID_NEWLM = -1
+--- /dev/null
++++ b/Lib/plat-gnukfreebsd8/IN.py
+@@ -0,0 +1,809 @@
++# Generated by h2py from /usr/include/netinet/in.h
++_NETINET_IN_H = 1
++
++# Included from features.h
++_FEATURES_H = 1
++__USE_ANSI = 1
++__FAVOR_BSD = 1
++_ISOC99_SOURCE = 1
++_POSIX_SOURCE = 1
++_POSIX_C_SOURCE = 200809L
++_XOPEN_SOURCE = 700
++_XOPEN_SOURCE_EXTENDED = 1
++_LARGEFILE64_SOURCE = 1
++_BSD_SOURCE = 1
++_SVID_SOURCE = 1
++_ATFILE_SOURCE = 1
++_BSD_SOURCE = 1
++_SVID_SOURCE = 1
++__USE_ISOC99 = 1
++__USE_ISOC95 = 1
++_POSIX_SOURCE = 1
++_POSIX_C_SOURCE = 2
++_POSIX_C_SOURCE = 199506L
++_POSIX_C_SOURCE = 200112L
++_POSIX_C_SOURCE = 200809L
++__USE_POSIX_IMPLICITLY = 1
++__USE_POSIX = 1
++__USE_POSIX2 = 1
++__USE_POSIX199309 = 1
++__USE_POSIX199506 = 1
++__USE_XOPEN2K = 1
++__USE_ISOC99 = 1
++__USE_XOPEN2K8 = 1
++_ATFILE_SOURCE = 1
++__USE_XOPEN = 1
++__USE_XOPEN_EXTENDED = 1
++__USE_UNIX98 = 1
++_LARGEFILE_SOURCE = 1
++__USE_XOPEN2K8 = 1
++__USE_XOPEN2K = 1
++__USE_ISOC99 = 1
++__USE_XOPEN_EXTENDED = 1
++__USE_LARGEFILE = 1
++__USE_LARGEFILE64 = 1
++__USE_FILE_OFFSET64 = 1
++__USE_MISC = 1
++__USE_BSD = 1
++__USE_SVID = 1
++__USE_ATFILE = 1
++__USE_GNU = 1
++__USE_REENTRANT = 1
++__USE_FORTIFY_LEVEL = 2
++__USE_FORTIFY_LEVEL = 1
++__USE_FORTIFY_LEVEL = 0
++
++# Included from bits/predefs.h
++__STDC_IEC_559__ = 1
++__STDC_IEC_559_COMPLEX__ = 1
++__STDC_ISO_10646__ = 200009L
++__GNU_LIBRARY__ = 6
++__GLIBC__ = 2
++__GLIBC_MINOR__ = 11
++__GLIBC_HAVE_LONG_LONG = 1
++
++# Included from sys/cdefs.h
++_SYS_CDEFS_H = 1
++def __NTH(fct): return fct
++
++def __NTH(fct): return fct
++
++def __P(args): return args
++
++def __PMT(args): return args
++
++def __STRING(x): return #x
++
++def __bos(ptr): return __builtin_object_size (ptr, __USE_FORTIFY_LEVEL > 1)
++
++def __bos0(ptr): return __builtin_object_size (ptr, 0)
++
++def __warnattr(msg): return __attribute__((__warning__ (msg)))
++
++__flexarr = []
++__flexarr = [0]
++__flexarr = []
++__flexarr = [1]
++def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
++
++def __attribute__(xyz): return
++
++def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
++
++def __attribute_format_arg__(x): return
++
++
++# Included from bits/wordsize.h
++__WORDSIZE = 32
++__LDBL_COMPAT = 1
++def __LDBL_REDIR_DECL(name): return \
++
++__USE_LARGEFILE = 1
++__USE_LARGEFILE64 = 1
++__USE_EXTERN_INLINES = 1
++__USE_EXTERN_INLINES_IN_LIBC = 1
++
++# Included from gnu/stubs.h
++
++# Included from stdint.h
++_STDINT_H = 1
++
++# Included from bits/wchar.h
++_BITS_WCHAR_H = 1
++__WCHAR_MAX = (2147483647)
++__WCHAR_MIN = (-__WCHAR_MAX - 1)
++def __INT64_C(c): return c ## L
++
++def __UINT64_C(c): return c ## UL
++
++def __INT64_C(c): return c ## LL
++
++def __UINT64_C(c): return c ## ULL
++
++INT8_MIN = (-128)
++INT16_MIN = (-32767-1)
++INT32_MIN = (-2147483647-1)
++INT64_MIN = (-__INT64_C(9223372036854775807)-1)
++INT8_MAX = (127)
++INT16_MAX = (32767)
++INT32_MAX = (2147483647)
++INT64_MAX = (__INT64_C(9223372036854775807))
++UINT8_MAX = (255)
++UINT16_MAX = (65535)
++UINT64_MAX = (__UINT64_C(18446744073709551615))
++INT_LEAST8_MIN = (-128)
++INT_LEAST16_MIN = (-32767-1)
++INT_LEAST32_MIN = (-2147483647-1)
++INT_LEAST64_MIN = (-__INT64_C(9223372036854775807)-1)
++INT_LEAST8_MAX = (127)
++INT_LEAST16_MAX = (32767)
++INT_LEAST32_MAX = (2147483647)
++INT_LEAST64_MAX = (__INT64_C(9223372036854775807))
++UINT_LEAST8_MAX = (255)
++UINT_LEAST16_MAX = (65535)
++UINT_LEAST64_MAX = (__UINT64_C(18446744073709551615))
++INT_FAST8_MIN = (-128)
++INT_FAST16_MIN = (-9223372036854775807L-1)
++INT_FAST32_MIN = (-9223372036854775807L-1)
++INT_FAST16_MIN = (-2147483647-1)
++INT_FAST32_MIN = (-2147483647-1)
++INT_FAST64_MIN = (-__INT64_C(9223372036854775807)-1)
++INT_FAST8_MAX = (127)
++INT_FAST16_MAX = (9223372036854775807L)
++INT_FAST32_MAX = (9223372036854775807L)
++INT_FAST16_MAX = (2147483647)
++INT_FAST32_MAX = (2147483647)
++INT_FAST64_MAX = (__INT64_C(9223372036854775807))
++UINT_FAST8_MAX = (255)
++UINT_FAST64_MAX = (__UINT64_C(18446744073709551615))
++INTPTR_MIN = (-9223372036854775807L-1)
++INTPTR_MAX = (9223372036854775807L)
++INTPTR_MIN = (-2147483647-1)
++INTPTR_MAX = (2147483647)
++INTMAX_MIN = (-__INT64_C(9223372036854775807)-1)
++INTMAX_MAX = (__INT64_C(9223372036854775807))
++UINTMAX_MAX = (__UINT64_C(18446744073709551615))
++PTRDIFF_MIN = (-9223372036854775807L-1)
++PTRDIFF_MAX = (9223372036854775807L)
++PTRDIFF_MIN = (-2147483647-1)
++PTRDIFF_MAX = (2147483647)
++SIG_ATOMIC_MIN = (-2147483647-1)
++SIG_ATOMIC_MAX = (2147483647)
++WCHAR_MIN = __WCHAR_MIN
++WCHAR_MAX = __WCHAR_MAX
++def INT8_C(c): return c
++
++def INT16_C(c): return c
++
++def INT32_C(c): return c
++
++def INT64_C(c): return c ## L
++
++def INT64_C(c): return c ## LL
++
++def UINT8_C(c): return c
++
++def UINT16_C(c): return c
++
++def UINT32_C(c): return c ## U
++
++def UINT64_C(c): return c ## UL
++
++def UINT64_C(c): return c ## ULL
++
++def INTMAX_C(c): return c ## L
++
++def UINTMAX_C(c): return c ## UL
++
++def INTMAX_C(c): return c ## LL
++
++def UINTMAX_C(c): return c ## ULL
++
++
++# Included from sys/socket.h
++_SYS_SOCKET_H = 1
++
++# Included from sys/uio.h
++_SYS_UIO_H = 1
++from TYPES import *
++
++# Included from bits/uio.h
++_BITS_UIO_H = 1
++from TYPES import *
++UIO_MAXIOV = 1024
++
++# Included from bits/sigset.h
++_SIGSET_H_types = 1
++_SIGSET_H_fns = 1
++def __sigword(sig): return (((sig) - 1) >> 5)
++
++def __sigemptyset(set): return \
++
++def __sigfillset(set): return \
++
++def __sigisemptyset(set): return \
++
++
++# Included from bits/socket.h
++__BITS_SOCKET_H = 1
++
++# Included from limits.h
++_LIBC_LIMITS_H_ = 1
++MB_LEN_MAX = 16
++_LIMITS_H = 1
++CHAR_BIT = 8
++SCHAR_MIN = (-128)
++SCHAR_MAX = 127
++UCHAR_MAX = 255
++CHAR_MIN = 0
++CHAR_MAX = UCHAR_MAX
++CHAR_MIN = SCHAR_MIN
++CHAR_MAX = SCHAR_MAX
++SHRT_MIN = (-32768)
++SHRT_MAX = 32767
++USHRT_MAX = 65535
++INT_MAX = 2147483647
++LONG_MAX = 9223372036854775807L
++LONG_MAX = 2147483647L
++LONG_MIN = (-LONG_MAX - 1L)
++
++# Included from bits/posix1_lim.h
++_BITS_POSIX1_LIM_H = 1
++_POSIX_AIO_LISTIO_MAX = 2
++_POSIX_AIO_MAX = 1
++_POSIX_ARG_MAX = 4096
++_POSIX_CHILD_MAX = 25
++_POSIX_CHILD_MAX = 6
++_POSIX_DELAYTIMER_MAX = 32
++_POSIX_HOST_NAME_MAX = 255
++_POSIX_LINK_MAX = 8
++_POSIX_LOGIN_NAME_MAX = 9
++_POSIX_MAX_CANON = 255
++_POSIX_MAX_INPUT = 255
++_POSIX_MQ_OPEN_MAX = 8
++_POSIX_MQ_PRIO_MAX = 32
++_POSIX_NAME_MAX = 14
++_POSIX_NGROUPS_MAX = 8
++_POSIX_NGROUPS_MAX = 0
++_POSIX_OPEN_MAX = 20
++_POSIX_OPEN_MAX = 16
++_POSIX_FD_SETSIZE = _POSIX_OPEN_MAX
++_POSIX_PATH_MAX = 256
++_POSIX_PIPE_BUF = 512
++_POSIX_RE_DUP_MAX = 255
++_POSIX_RTSIG_MAX = 8
++_POSIX_SEM_NSEMS_MAX = 256
++_POSIX_SEM_VALUE_MAX = 32767
++_POSIX_SIGQUEUE_MAX = 32
++_POSIX_SSIZE_MAX = 32767
++_POSIX_STREAM_MAX = 8
++_POSIX_SYMLINK_MAX = 255
++_POSIX_SYMLOOP_MAX = 8
++_POSIX_TIMER_MAX = 32
++_POSIX_TTY_NAME_MAX = 9
++_POSIX_TZNAME_MAX = 6
++_POSIX_QLIMIT = 1
++_POSIX_HIWAT = _POSIX_PIPE_BUF
++_POSIX_UIO_MAXIOV = 16
++_POSIX_CLOCKRES_MIN = 20000000
++
++# Included from bits/local_lim.h
++
++# Included from sys/syslimits.h
++ARG_MAX = 262144
++CHILD_MAX = 40
++LINK_MAX = 32767
++MAX_CANON = 255
++MAX_INPUT = 255
++NAME_MAX = 255
++NGROUPS_MAX = 1023
++OPEN_MAX = 64
++PATH_MAX = 1024
++PIPE_BUF = 512
++IOV_MAX = 1024
++_POSIX_THREAD_KEYS_MAX = 128
++PTHREAD_KEYS_MAX = 1024
++_POSIX_THREAD_DESTRUCTOR_ITERATIONS = 4
++PTHREAD_DESTRUCTOR_ITERATIONS = _POSIX_THREAD_DESTRUCTOR_ITERATIONS
++_POSIX_THREAD_THREADS_MAX = 64
++PTHREAD_THREADS_MAX = 1024
++AIO_PRIO_DELTA_MAX = 20
++PTHREAD_STACK_MIN = 16384
++TIMER_MAX = 256
++DELAYTIMER_MAX = 2147483647
++SSIZE_MAX = LONG_MAX
++NGROUPS_MAX = 8
++
++# Included from bits/posix2_lim.h
++_BITS_POSIX2_LIM_H = 1
++_POSIX2_BC_BASE_MAX = 99
++_POSIX2_BC_DIM_MAX = 2048
++_POSIX2_BC_SCALE_MAX = 99
++_POSIX2_BC_STRING_MAX = 1000
++_POSIX2_COLL_WEIGHTS_MAX = 2
++_POSIX2_EXPR_NEST_MAX = 32
++_POSIX2_LINE_MAX = 2048
++_POSIX2_RE_DUP_MAX = 255
++_POSIX2_CHARCLASS_NAME_MAX = 14
++BC_BASE_MAX = _POSIX2_BC_BASE_MAX
++BC_DIM_MAX = _POSIX2_BC_DIM_MAX
++BC_SCALE_MAX = _POSIX2_BC_SCALE_MAX
++BC_STRING_MAX = _POSIX2_BC_STRING_MAX
++COLL_WEIGHTS_MAX = 255
++EXPR_NEST_MAX = _POSIX2_EXPR_NEST_MAX
++LINE_MAX = _POSIX2_LINE_MAX
++CHARCLASS_NAME_MAX = 2048
++RE_DUP_MAX = (0x7fff)
++
++# Included from bits/xopen_lim.h
++_XOPEN_LIM_H = 1
++
++# Included from bits/stdio_lim.h
++L_tmpnam = 20
++TMP_MAX = 238328
++FILENAME_MAX = 1024
++L_ctermid = 9
++L_cuserid = 9
++FOPEN_MAX = 64
++IOV_MAX = 1024
++_XOPEN_IOV_MAX = _POSIX_UIO_MAXIOV
++NL_ARGMAX = _POSIX_ARG_MAX
++NL_LANGMAX = _POSIX2_LINE_MAX
++NL_MSGMAX = INT_MAX
++NL_NMAX = INT_MAX
++NL_SETMAX = INT_MAX
++NL_TEXTMAX = INT_MAX
++NZERO = 20
++WORD_BIT = 16
++WORD_BIT = 32
++WORD_BIT = 64
++WORD_BIT = 16
++WORD_BIT = 32
++WORD_BIT = 64
++WORD_BIT = 32
++LONG_BIT = 32
++LONG_BIT = 64
++LONG_BIT = 32
++LONG_BIT = 64
++LONG_BIT = 64
++LONG_BIT = 32
++
++# Included from bits/types.h
++_BITS_TYPES_H = 1
++__S32_TYPE = int
++__SWORD_TYPE = int
++__SLONG32_TYPE = int
++
++# Included from bits/typesizes.h
++_BITS_TYPESIZES_H = 1
++__PID_T_TYPE = __S32_TYPE
++__CLOCK_T_TYPE = __S32_TYPE
++__SWBLK_T_TYPE = __S32_TYPE
++__CLOCKID_T_TYPE = __S32_TYPE
++__TIMER_T_TYPE = __S32_TYPE
++__SSIZE_T_TYPE = __SWORD_TYPE
++__FD_SETSIZE = 1024
++PF_UNSPEC = 0
++PF_LOCAL = 1
++PF_UNIX = PF_LOCAL
++PF_FILE = PF_LOCAL
++PF_INET = 2
++PF_IMPLINK = 3
++PF_PUP = 4
++PF_CHAOS = 5
++PF_NS = 6
++PF_ISO = 7
++PF_OSI = PF_ISO
++PF_ECMA = 8
++PF_DATAKIT = 9
++PF_CCITT = 10
++PF_SNA = 11
++PF_DECnet = 12
++PF_DLI = 13
++PF_LAT = 14
++PF_HYLINK = 15
++PF_APPLETALK = 16
++PF_ROUTE = 17
++PF_LINK = 18
++PF_XTP = 19
++PF_COIP = 20
++PF_CNT = 21
++PF_RTIP = 22
++PF_IPX = 23
++PF_SIP = 24
++PF_PIP = 25
++PF_ISDN = 26
++PF_KEY = 27
++PF_INET6 = 28
++PF_NATM = 29
++PF_ATM = 30
++PF_HDRCMPLT = 31
++PF_NETGRAPH = 32
++PF_MAX = 33
++AF_UNSPEC = PF_UNSPEC
++AF_LOCAL = PF_LOCAL
++AF_UNIX = PF_UNIX
++AF_FILE = PF_FILE
++AF_INET = PF_INET
++AF_IMPLINK = PF_IMPLINK
++AF_PUP = PF_PUP
++AF_CHAOS = PF_CHAOS
++AF_NS = PF_NS
++AF_ISO = PF_ISO
++AF_OSI = PF_OSI
++AF_ECMA = PF_ECMA
++AF_DATAKIT = PF_DATAKIT
++AF_CCITT = PF_CCITT
++AF_SNA = PF_SNA
++AF_DECnet = PF_DECnet
++AF_DLI = PF_DLI
++AF_LAT = PF_LAT
++AF_HYLINK = PF_HYLINK
++AF_APPLETALK = PF_APPLETALK
++AF_ROUTE = PF_ROUTE
++AF_LINK = PF_LINK
++pseudo_AF_XTP = PF_XTP
++AF_COIP = PF_COIP
++AF_CNT = PF_CNT
++pseudo_AF_RTIP = PF_RTIP
++AF_IPX = PF_IPX
++AF_SIP = PF_SIP
++pseudo_AF_PIP = PF_PIP
++AF_ISDN = PF_ISDN
++AF_E164 = AF_ISDN
++pseudo_AF_KEY = PF_KEY
++AF_INET6 = PF_INET6
++AF_NATM = PF_NATM
++AF_ATM = PF_ATM
++pseudo_AF_HDRCMPLT = PF_HDRCMPLT
++AF_NETGRAPH = PF_NETGRAPH
++AF_MAX = PF_MAX
++SOMAXCONN = 128
++
++# Included from bits/sockaddr.h
++_BITS_SOCKADDR_H = 1
++def __SOCKADDR_COMMON(sa_prefix): return \
++
++_HAVE_SA_LEN = 1
++_SS_SIZE = 128
++def CMSG_FIRSTHDR(mhdr): return \
++
++CMGROUP_MAX = 16
++SOL_SOCKET = 0xffff
++LOCAL_PEERCRED = 0x001
++LOCAL_CREDS = 0x002
++LOCAL_CONNWAIT = 0x004
++
++# Included from bits/socket2.h
++def IN_CLASSA(a): return ((((in_addr_t)(a)) & (-2147483648)) == 0)
++
++IN_CLASSA_NET = (-16777216)
++IN_CLASSA_NSHIFT = 24
++IN_CLASSA_HOST = ((-1) & ~IN_CLASSA_NET)
++IN_CLASSA_MAX = 128
++def IN_CLASSB(a): return ((((in_addr_t)(a)) & (-1073741824)) == (-2147483648))
++
++IN_CLASSB_NET = (-65536)
++IN_CLASSB_NSHIFT = 16
++IN_CLASSB_HOST = ((-1) & ~IN_CLASSB_NET)
++IN_CLASSB_MAX = 65536
++def IN_CLASSC(a): return ((((in_addr_t)(a)) & (-536870912)) == (-1073741824))
++
++IN_CLASSC_NET = (-256)
++IN_CLASSC_NSHIFT = 8
++IN_CLASSC_HOST = ((-1) & ~IN_CLASSC_NET)
++def IN_CLASSD(a): return ((((in_addr_t)(a)) & (-268435456)) == (-536870912))
++
++def IN_MULTICAST(a): return IN_CLASSD(a)
++
++def IN_EXPERIMENTAL(a): return ((((in_addr_t)(a)) & (-536870912)) == (-536870912))
++
++def IN_BADCLASS(a): return ((((in_addr_t)(a)) & (-268435456)) == (-268435456))
++
++IN_LOOPBACKNET = 127
++INET_ADDRSTRLEN = 16
++INET6_ADDRSTRLEN = 46
++
++# Included from bits/in.h
++IMPLINK_IP = 155
++IMPLINK_LOWEXPER = 156
++IMPLINK_HIGHEXPER = 158
++IPPROTO_DIVERT = 258
++SOL_IP = 0
++IP_OPTIONS = 1
++IP_HDRINCL = 2
++IP_TOS = 3
++IP_TTL = 4
++IP_RECVOPTS = 5
++IP_RECVRETOPTS = 6
++IP_RECVDSTADDR = 7
++IP_SENDSRCADDR = IP_RECVDSTADDR
++IP_RETOPTS = 8
++IP_MULTICAST_IF = 9
++IP_MULTICAST_TTL = 10
++IP_MULTICAST_LOOP = 11
++IP_ADD_MEMBERSHIP = 12
++IP_DROP_MEMBERSHIP = 13
++IP_MULTICAST_VIF = 14
++IP_RSVP_ON = 15
++IP_RSVP_OFF = 16
++IP_RSVP_VIF_ON = 17
++IP_RSVP_VIF_OFF = 18
++IP_PORTRANGE = 19
++IP_RECVIF = 20
++IP_IPSEC_POLICY = 21
++IP_FAITH = 22
++IP_ONESBCAST = 23
++IP_NONLOCALOK = 24
++IP_FW_TABLE_ADD = 40
++IP_FW_TABLE_DEL = 41
++IP_FW_TABLE_FLUSH = 42
++IP_FW_TABLE_GETSIZE = 43
++IP_FW_TABLE_LIST = 44
++IP_FW_ADD = 50
++IP_FW_DEL = 51
++IP_FW_FLUSH = 52
++IP_FW_ZERO = 53
++IP_FW_GET = 54
++IP_FW_RESETLOG = 55
++IP_FW_NAT_CFG = 56
++IP_FW_NAT_DEL = 57
++IP_FW_NAT_GET_CONFIG = 58
++IP_FW_NAT_GET_LOG = 59
++IP_DUMMYNET_CONFIGURE = 60
++IP_DUMMYNET_DEL = 61
++IP_DUMMYNET_FLUSH = 62
++IP_DUMMYNET_GET = 64
++IP_RECVTTL = 65
++IP_MINTTL = 66
++IP_DONTFRAG = 67
++IP_ADD_SOURCE_MEMBERSHIP = 70
++IP_DROP_SOURCE_MEMBERSHIP = 71
++IP_BLOCK_SOURCE = 72
++IP_UNBLOCK_SOURCE = 73
++IP_MSFILTER = 74
++MCAST_JOIN_GROUP = 80
++MCAST_LEAVE_GROUP = 81
++MCAST_JOIN_SOURCE_GROUP = 82
++MCAST_LEAVE_SOURCE_GROUP = 83
++MCAST_BLOCK_SOURCE = 84
++MCAST_UNBLOCK_SOURCE = 85
++IP_DEFAULT_MULTICAST_TTL = 1
++IP_DEFAULT_MULTICAST_LOOP = 1
++IP_MIN_MEMBERSHIPS = 31
++IP_MAX_MEMBERSHIPS = 4095
++IP_MAX_SOURCE_FILTER = 1024
++MCAST_UNDEFINED = 0
++MCAST_INCLUDE = 1
++MCAST_EXCLUDE = 2
++IP_PORTRANGE_DEFAULT = 0
++IP_PORTRANGE_HIGH = 1
++IP_PORTRANGE_LOW = 2
++IPCTL_FORWARDING = 1
++IPCTL_SENDREDIRECTS = 2
++IPCTL_DEFTTL = 3
++IPCTL_DEFMTU = 4
++IPCTL_RTEXPIRE = 5
++IPCTL_RTMINEXPIRE = 6
++IPCTL_RTMAXCACHE = 7
++IPCTL_SOURCEROUTE = 8
++IPCTL_DIRECTEDBROADCAST = 9
++IPCTL_INTRQMAXLEN = 10
++IPCTL_INTRQDROPS = 11
++IPCTL_STATS = 12
++IPCTL_ACCEPTSOURCEROUTE = 13
++IPCTL_FASTFORWARDING = 14
++IPCTL_KEEPFAITH = 15
++IPCTL_GIF_TTL = 16
++IPCTL_MAXID = 17
++IPV6_SOCKOPT_RESERVED1 = 3
++IPV6_UNICAST_HOPS = 4
++IPV6_MULTICAST_IF = 9
++IPV6_MULTICAST_HOPS = 10
++IPV6_MULTICAST_LOOP = 11
++IPV6_JOIN_GROUP = 12
++IPV6_LEAVE_GROUP = 13
++IPV6_PORTRANGE = 14
++ICMP6_FILTER = 18
++IPV6_CHECKSUM = 26
++IPV6_V6ONLY = 27
++IPV6_IPSEC_POLICY = 28
++IPV6_FAITH = 29
++IPV6_FW_ADD = 30
++IPV6_FW_DEL = 31
++IPV6_FW_FLUSH = 32
++IPV6_FW_ZERO = 33
++IPV6_FW_GET = 34
++IPV6_RTHDRDSTOPTS = 35
++IPV6_RECVPKTINFO = 36
++IPV6_RECVHOPLIMIT = 37
++IPV6_RECVRTHDR = 38
++IPV6_RECVHOPOPTS = 39
++IPV6_RECVDSTOPTS = 40
++IPV6_USE_MIN_MTU = 42
++IPV6_RECVPATHMTU = 43
++IPV6_PATHMTU = 44
++IPV6_PKTINFO = 46
++IPV6_HOPLIMIT = 47
++IPV6_NEXTHOP = 48
++IPV6_HOPOPTS = 49
++IPV6_DSTOPTS = 50
++IPV6_RTHDR = 51
++IPV6_RECVTCLASS = 57
++IPV6_AUTOFLOWLABEL = 59
++IPV6_TCLASS = 61
++IPV6_DONTFRAG = 62
++IPV6_PREFER_TEMPADDR = 63
++IPV6_ADD_MEMBERSHIP = IPV6_JOIN_GROUP
++IPV6_DROP_MEMBERSHIP = IPV6_LEAVE_GROUP
++IPV6_RXHOPOPTS = IPV6_HOPOPTS
++IPV6_RXDSTOPTS = IPV6_DSTOPTS
++SOL_IPV6 = 41
++SOL_ICMPV6 = 58
++IPV6_DEFAULT_MULTICAST_HOPS = 1
++IPV6_DEFAULT_MULTICAST_LOOP = 1
++IPV6_PORTRANGE_DEFAULT = 0
++IPV6_PORTRANGE_HIGH = 1
++IPV6_PORTRANGE_LOW = 2
++IPV6_RTHDR_LOOSE = 0
++IPV6_RTHDR_STRICT = 1
++IPV6_RTHDR_TYPE_0 = 0
++IPV6CTL_FORWARDING = 1
++IPV6CTL_SENDREDIRECTS = 2
++IPV6CTL_DEFHLIM = 3
++IPV6CTL_FORWSRCRT = 5
++IPV6CTL_STATS = 6
++IPV6CTL_MRTSTATS = 7
++IPV6CTL_MRTPROTO = 8
++IPV6CTL_MAXFRAGPACKETS = 9
++IPV6CTL_SOURCECHECK = 10
++IPV6CTL_SOURCECHECK_LOGINT = 11
++IPV6CTL_ACCEPT_RTADV = 12
++IPV6CTL_KEEPFAITH = 13
++IPV6CTL_LOG_INTERVAL = 14
++IPV6CTL_HDRNESTLIMIT = 15
++IPV6CTL_DAD_COUNT = 16
++IPV6CTL_AUTO_FLOWLABEL = 17
++IPV6CTL_DEFMCASTHLIM = 18
++IPV6CTL_GIF_HLIM = 19
++IPV6CTL_KAME_VERSION = 20
++IPV6CTL_USE_DEPRECATED = 21
++IPV6CTL_RR_PRUNE = 22
++IPV6CTL_V6ONLY = 24
++IPV6CTL_RTEXPIRE = 25
++IPV6CTL_RTMINEXPIRE = 26
++IPV6CTL_RTMAXCACHE = 27
++IPV6CTL_USETEMPADDR = 32
++IPV6CTL_TEMPPLTIME = 33
++IPV6CTL_TEMPVLTIME = 34
++IPV6CTL_AUTO_LINKLOCAL = 35
++IPV6CTL_RIP6STATS = 36
++IPV6CTL_PREFER_TEMPADDR = 37
++IPV6CTL_ADDRCTLPOLICY = 38
++IPV6CTL_USE_DEFAULTZONE = 39
++IPV6CTL_MAXFRAGS = 41
++IPV6CTL_MCAST_PMTU = 44
++IPV6CTL_STEALTH = 45
++ICMPV6CTL_ND6_ONLINKNSRFC4861 = 47
++IPV6CTL_MAXID = 48
++
++# Included from endian.h
++_ENDIAN_H = 1
++__LITTLE_ENDIAN = 1234
++__BIG_ENDIAN = 4321
++__PDP_ENDIAN = 3412
++
++# Included from bits/endian.h
++__BYTE_ORDER = __LITTLE_ENDIAN
++__FLOAT_WORD_ORDER = __BYTE_ORDER
++LITTLE_ENDIAN = __LITTLE_ENDIAN
++BIG_ENDIAN = __BIG_ENDIAN
++PDP_ENDIAN = __PDP_ENDIAN
++BYTE_ORDER = __BYTE_ORDER
++
++# Included from bits/byteswap.h
++_BITS_BYTESWAP_H = 1
++def __bswap_constant_16(x): return \
++
++def __bswap_16(x): return \
++
++def __bswap_16(x): return \
++
++def __bswap_constant_32(x): return \
++
++def __bswap_32(x): return \
++
++def __bswap_32(x): return \
++
++def __bswap_32(x): return \
++
++def __bswap_constant_64(x): return \
++
++def __bswap_64(x): return \
++
++def htobe16(x): return __bswap_16 (x)
++
++def htole16(x): return (x)
++
++def be16toh(x): return __bswap_16 (x)
++
++def le16toh(x): return (x)
++
++def htobe32(x): return __bswap_32 (x)
++
++def htole32(x): return (x)
++
++def be32toh(x): return __bswap_32 (x)
++
++def le32toh(x): return (x)
++
++def htobe64(x): return __bswap_64 (x)
++
++def htole64(x): return (x)
++
++def be64toh(x): return __bswap_64 (x)
++
++def le64toh(x): return (x)
++
++def htobe16(x): return (x)
++
++def htole16(x): return __bswap_16 (x)
++
++def be16toh(x): return (x)
++
++def le16toh(x): return __bswap_16 (x)
++
++def htobe32(x): return (x)
++
++def htole32(x): return __bswap_32 (x)
++
++def be32toh(x): return (x)
++
++def le32toh(x): return __bswap_32 (x)
++
++def htobe64(x): return (x)
++
++def htole64(x): return __bswap_64 (x)
++
++def be64toh(x): return (x)
++
++def le64toh(x): return __bswap_64 (x)
++
++def ntohl(x): return (x)
++
++def ntohs(x): return (x)
++
++def htonl(x): return (x)
++
++def htons(x): return (x)
++
++def ntohl(x): return __bswap_32 (x)
++
++def ntohs(x): return __bswap_16 (x)
++
++def htonl(x): return __bswap_32 (x)
++
++def htons(x): return __bswap_16 (x)
++
++def IN6_IS_ADDR_UNSPECIFIED(a): return \
++
++def IN6_IS_ADDR_LOOPBACK(a): return \
++
++def IN6_IS_ADDR_LINKLOCAL(a): return \
++
++def IN6_IS_ADDR_SITELOCAL(a): return \
++
++def IN6_IS_ADDR_V4MAPPED(a): return \
++
++def IN6_IS_ADDR_V4COMPAT(a): return \
++
++def IN6_IS_ADDR_MC_NODELOCAL(a): return \
++
++def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
++
++def IN6_IS_ADDR_MC_SITELOCAL(a): return \
++
++def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
++
++def IN6_IS_ADDR_MC_GLOBAL(a): return \
++
+--- /dev/null
++++ b/Lib/plat-gnukfreebsd8/TYPES.py
+@@ -0,0 +1,303 @@
++# Generated by h2py from /usr/include/sys/types.h
++_SYS_TYPES_H = 1
++
++# Included from features.h
++_FEATURES_H = 1
++__USE_ANSI = 1
++__FAVOR_BSD = 1
++_ISOC99_SOURCE = 1
++_POSIX_SOURCE = 1
++_POSIX_C_SOURCE = 200809L
++_XOPEN_SOURCE = 700
++_XOPEN_SOURCE_EXTENDED = 1
++_LARGEFILE64_SOURCE = 1
++_BSD_SOURCE = 1
++_SVID_SOURCE = 1
++_ATFILE_SOURCE = 1
++_BSD_SOURCE = 1
++_SVID_SOURCE = 1
++__USE_ISOC99 = 1
++__USE_ISOC95 = 1
++_POSIX_SOURCE = 1
++_POSIX_C_SOURCE = 2
++_POSIX_C_SOURCE = 199506L
++_POSIX_C_SOURCE = 200112L
++_POSIX_C_SOURCE = 200809L
++__USE_POSIX_IMPLICITLY = 1
++__USE_POSIX = 1
++__USE_POSIX2 = 1
++__USE_POSIX199309 = 1
++__USE_POSIX199506 = 1
++__USE_XOPEN2K = 1
++__USE_ISOC99 = 1
++__USE_XOPEN2K8 = 1
++_ATFILE_SOURCE = 1
++__USE_XOPEN = 1
++__USE_XOPEN_EXTENDED = 1
++__USE_UNIX98 = 1
++_LARGEFILE_SOURCE = 1
++__USE_XOPEN2K8 = 1
++__USE_XOPEN2K = 1
++__USE_ISOC99 = 1
++__USE_XOPEN_EXTENDED = 1
++__USE_LARGEFILE = 1
++__USE_LARGEFILE64 = 1
++__USE_FILE_OFFSET64 = 1
++__USE_MISC = 1
++__USE_BSD = 1
++__USE_SVID = 1
++__USE_ATFILE = 1
++__USE_GNU = 1
++__USE_REENTRANT = 1
++__USE_FORTIFY_LEVEL = 2
++__USE_FORTIFY_LEVEL = 1
++__USE_FORTIFY_LEVEL = 0
++
++# Included from bits/predefs.h
++__STDC_IEC_559__ = 1
++__STDC_IEC_559_COMPLEX__ = 1
++__STDC_ISO_10646__ = 200009L
++__GNU_LIBRARY__ = 6
++__GLIBC__ = 2
++__GLIBC_MINOR__ = 11
++__GLIBC_HAVE_LONG_LONG = 1
++
++# Included from sys/cdefs.h
++_SYS_CDEFS_H = 1
++def __NTH(fct): return fct
++
++def __NTH(fct): return fct
++
++def __P(args): return args
++
++def __PMT(args): return args
++
++def __STRING(x): return #x
++
++def __bos(ptr): return __builtin_object_size (ptr, __USE_FORTIFY_LEVEL > 1)
++
++def __bos0(ptr): return __builtin_object_size (ptr, 0)
++
++def __warnattr(msg): return __attribute__((__warning__ (msg)))
++
++__flexarr = []
++__flexarr = [0]
++__flexarr = []
++__flexarr = [1]
++def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
++
++def __attribute__(xyz): return
++
++def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
++
++def __attribute_format_arg__(x): return
++
++
++# Included from bits/wordsize.h
++__WORDSIZE = 32
++__LDBL_COMPAT = 1
++def __LDBL_REDIR_DECL(name): return \
++
++__USE_LARGEFILE = 1
++__USE_LARGEFILE64 = 1
++__USE_EXTERN_INLINES = 1
++__USE_EXTERN_INLINES_IN_LIBC = 1
++
++# Included from gnu/stubs.h
++
++# Included from bits/types.h
++_BITS_TYPES_H = 1
++__S32_TYPE = int
++__SWORD_TYPE = int
++__SLONG32_TYPE = int
++
++# Included from bits/typesizes.h
++_BITS_TYPESIZES_H = 1
++__PID_T_TYPE = __S32_TYPE
++__CLOCK_T_TYPE = __S32_TYPE
++__SWBLK_T_TYPE = __S32_TYPE
++__CLOCKID_T_TYPE = __S32_TYPE
++__TIMER_T_TYPE = __S32_TYPE
++__SSIZE_T_TYPE = __SWORD_TYPE
++__FD_SETSIZE = 1024
++
++# Included from time.h
++_TIME_H = 1
++
++# Included from bits/time.h
++_BITS_TIME_H = 1
++CLOCKS_PER_SEC = 1000000l
++CLK_TCK = 128
++CLOCK_REALTIME = 0
++CLOCK_PROCESS_CPUTIME_ID = 2
++CLOCK_THREAD_CPUTIME_ID = 3
++CLOCK_MONOTONIC = 4
++CLOCK_VIRTUAL = 1
++CLOCK_PROF = 2
++CLOCK_UPTIME = 5
++CLOCK_UPTIME_PRECISE = 7
++CLOCK_UPTIME_FAST = 8
++CLOCK_REALTIME_PRECISE = 9
++CLOCK_REALTIME_FAST = 10
++CLOCK_MONOTONIC_PRECISE = 11
++CLOCK_MONOTONIC_FAST = 12
++CLOCK_SECOND = 13
++TIMER_RELTIME = 0
++TIMER_ABSTIME = 1
++_STRUCT_TIMEVAL = 1
++CLK_TCK = CLOCKS_PER_SEC
++__clock_t_defined = 1
++__time_t_defined = 1
++__clockid_t_defined = 1
++__timer_t_defined = 1
++__timespec_defined = 1
++
++# Included from xlocale.h
++_XLOCALE_H = 1
++def __isleap(year): return \
++
++__BIT_TYPES_DEFINED__ = 1
++
++# Included from endian.h
++_ENDIAN_H = 1
++__LITTLE_ENDIAN = 1234
++__BIG_ENDIAN = 4321
++__PDP_ENDIAN = 3412
++
++# Included from bits/endian.h
++__BYTE_ORDER = __LITTLE_ENDIAN
++__FLOAT_WORD_ORDER = __BYTE_ORDER
++LITTLE_ENDIAN = __LITTLE_ENDIAN
++BIG_ENDIAN = __BIG_ENDIAN
++PDP_ENDIAN = __PDP_ENDIAN
++BYTE_ORDER = __BYTE_ORDER
++
++# Included from bits/byteswap.h
++_BITS_BYTESWAP_H = 1
++def __bswap_constant_16(x): return \
++
++def __bswap_16(x): return \
++
++def __bswap_16(x): return \
++
++def __bswap_constant_32(x): return \
++
++def __bswap_32(x): return \
++
++def __bswap_32(x): return \
++
++def __bswap_32(x): return \
++
++def __bswap_constant_64(x): return \
++
++def __bswap_64(x): return \
++
++def htobe16(x): return __bswap_16 (x)
++
++def htole16(x): return (x)
++
++def be16toh(x): return __bswap_16 (x)
++
++def le16toh(x): return (x)
++
++def htobe32(x): return __bswap_32 (x)
++
++def htole32(x): return (x)
++
++def be32toh(x): return __bswap_32 (x)
++
++def le32toh(x): return (x)
++
++def htobe64(x): return __bswap_64 (x)
++
++def htole64(x): return (x)
++
++def be64toh(x): return __bswap_64 (x)
++
++def le64toh(x): return (x)
++
++def htobe16(x): return (x)
++
++def htole16(x): return __bswap_16 (x)
++
++def be16toh(x): return (x)
++
++def le16toh(x): return __bswap_16 (x)
++
++def htobe32(x): return (x)
++
++def htole32(x): return __bswap_32 (x)
++
++def be32toh(x): return (x)
++
++def le32toh(x): return __bswap_32 (x)
++
++def htobe64(x): return (x)
++
++def htole64(x): return __bswap_64 (x)
++
++def be64toh(x): return (x)
++
++def le64toh(x): return __bswap_64 (x)
++
++
++# Included from sys/select.h
++_SYS_SELECT_H = 1
++
++# Included from bits/select.h
++def __FD_ZERO(fdsp): return \
++
++def __FD_ZERO(set): return \
++
++
++# Included from bits/sigset.h
++_SIGSET_H_types = 1
++_SIGSET_H_fns = 1
++def __sigword(sig): return (((sig) - 1) >> 5)
++
++def __sigemptyset(set): return \
++
++def __sigfillset(set): return \
++
++def __sigisemptyset(set): return \
++
++def __FDELT(d): return ((d) / __NFDBITS)
++
++FD_SETSIZE = __FD_SETSIZE
++def FD_ZERO(fdsetp): return __FD_ZERO (fdsetp)
++
++
++# Included from sys/sysmacros.h
++_SYS_SYSMACROS_H = 1
++def minor(dev): return ((int)((dev) & (-65281)))
++
++def gnu_dev_major(dev): return major (dev)
++
++def gnu_dev_minor(dev): return minor (dev)
++
++
++# Included from bits/pthreadtypes.h
++_BITS_PTHREADTYPES_H = 1
++
++# Included from bits/sched.h
++SCHED_OTHER = 2
++SCHED_FIFO = 1
++SCHED_RR = 3
++CSIGNAL = 0x000000ff
++CLONE_VM = 0x00000100
++CLONE_FS = 0x00000200
++CLONE_FILES = 0x00000400
++CLONE_SIGHAND = 0x00000800
++CLONE_PTRACE = 0x00002000
++CLONE_VFORK = 0x00004000
++CLONE_SYSVSEM = 0x00040000
++__defined_schedparam = 1
++__CPU_SETSIZE = 128
++def __CPUELT(cpu): return ((cpu) / __NCPUBITS)
++
++def __CPU_ALLOC_SIZE(count): return \
++
++def __CPU_ALLOC(count): return __sched_cpualloc (count)
++
++def __CPU_FREE(cpuset): return __sched_cpufree (cpuset)
++
+--- /dev/null
++++ b/Lib/plat-gnukfreebsd8/DLFCN.py
+@@ -0,0 +1,118 @@
++# Generated by h2py from /usr/include/dlfcn.h
++_DLFCN_H = 1
++
++# Included from features.h
++_FEATURES_H = 1
++__USE_ANSI = 1
++__FAVOR_BSD = 1
++_ISOC99_SOURCE = 1
++_POSIX_SOURCE = 1
++_POSIX_C_SOURCE = 200809L
++_XOPEN_SOURCE = 700
++_XOPEN_SOURCE_EXTENDED = 1
++_LARGEFILE64_SOURCE = 1
++_BSD_SOURCE = 1
++_SVID_SOURCE = 1
++_ATFILE_SOURCE = 1
++_BSD_SOURCE = 1
++_SVID_SOURCE = 1
++__USE_ISOC99 = 1
++__USE_ISOC95 = 1
++_POSIX_SOURCE = 1
++_POSIX_C_SOURCE = 2
++_POSIX_C_SOURCE = 199506L
++_POSIX_C_SOURCE = 200112L
++_POSIX_C_SOURCE = 200809L
++__USE_POSIX_IMPLICITLY = 1
++__USE_POSIX = 1
++__USE_POSIX2 = 1
++__USE_POSIX199309 = 1
++__USE_POSIX199506 = 1
++__USE_XOPEN2K = 1
++__USE_ISOC99 = 1
++__USE_XOPEN2K8 = 1
++_ATFILE_SOURCE = 1
++__USE_XOPEN = 1
++__USE_XOPEN_EXTENDED = 1
++__USE_UNIX98 = 1
++_LARGEFILE_SOURCE = 1
++__USE_XOPEN2K8 = 1
++__USE_XOPEN2K = 1
++__USE_ISOC99 = 1
++__USE_XOPEN_EXTENDED = 1
++__USE_LARGEFILE = 1
++__USE_LARGEFILE64 = 1
++__USE_FILE_OFFSET64 = 1
++__USE_MISC = 1
++__USE_BSD = 1
++__USE_SVID = 1
++__USE_ATFILE = 1
++__USE_GNU = 1
++__USE_REENTRANT = 1
++__USE_FORTIFY_LEVEL = 2
++__USE_FORTIFY_LEVEL = 1
++__USE_FORTIFY_LEVEL = 0
++
++# Included from bits/predefs.h
++__STDC_IEC_559__ = 1
++__STDC_IEC_559_COMPLEX__ = 1
++__STDC_ISO_10646__ = 200009L
++__GNU_LIBRARY__ = 6
++__GLIBC__ = 2
++__GLIBC_MINOR__ = 11
++__GLIBC_HAVE_LONG_LONG = 1
++
++# Included from sys/cdefs.h
++_SYS_CDEFS_H = 1
++def __NTH(fct): return fct
++
++def __NTH(fct): return fct
++
++def __P(args): return args
++
++def __PMT(args): return args
++
++def __STRING(x): return #x
++
++def __bos(ptr): return __builtin_object_size (ptr, __USE_FORTIFY_LEVEL > 1)
++
++def __bos0(ptr): return __builtin_object_size (ptr, 0)
++
++def __warnattr(msg): return __attribute__((__warning__ (msg)))
++
++__flexarr = []
++__flexarr = [0]
++__flexarr = []
++__flexarr = [1]
++def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
++
++def __attribute__(xyz): return
++
++def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
++
++def __attribute_format_arg__(x): return
++
++
++# Included from bits/wordsize.h
++__WORDSIZE = 32
++__LDBL_COMPAT = 1
++def __LDBL_REDIR_DECL(name): return \
++
++__USE_LARGEFILE = 1
++__USE_LARGEFILE64 = 1
++__USE_EXTERN_INLINES = 1
++__USE_EXTERN_INLINES_IN_LIBC = 1
++
++# Included from gnu/stubs.h
++
++# Included from bits/dlfcn.h
++RTLD_LAZY = 0x00001
++RTLD_NOW = 0x00002
++RTLD_BINDING_MASK = 0x3
++RTLD_NOLOAD = 0x00004
++RTLD_DEEPBIND = 0x00008
++RTLD_GLOBAL = 0x00100
++RTLD_LOCAL = 0
++RTLD_NODELETE = 0x01000
++LM_ID_BASE = 0
++LM_ID_NEWLM = -1
diff --git a/examples/python2.7/patches/platform-lsbrelease.diff b/examples/python2.7/patches/platform-lsbrelease.diff
new file mode 100644
index 0000000..cd6f7d1
--- /dev/null
+++ b/examples/python2.7/patches/platform-lsbrelease.diff
@@ -0,0 +1,41 @@
+# DP: Use /etc/lsb-release to identify the platform.
+
+--- a/Lib/platform.py
++++ b/Lib/platform.py
+@@ -288,6 +288,10 @@
+ id = l[1]
+ return '', version, id
+
++_distributor_id_file_re = re.compile("(?:DISTRIB_ID\s*=)\s*(.*)", re.I)
++_release_file_re = re.compile("(?:DISTRIB_RELEASE\s*=)\s*(.*)", re.I)
++_codename_file_re = re.compile("(?:DISTRIB_CODENAME\s*=)\s*(.*)", re.I)
++
+ def linux_distribution(distname='', version='', id='',
+
+ supported_dists=_supported_dists,
+@@ -312,6 +316,25 @@
+ args given as parameters.
+
+ """
++ # check for the Debian/Ubuntu /etc/lsb-release file first, needed so
++ # that the distribution doesn't get identified as Debian.
++ try:
++ etclsbrel = open("/etc/lsb-release", "rU")
++ for line in etclsbrel:
++ m = _distributor_id_file_re.search(line)
++ if m:
++ _u_distname = m.group(1).strip()
++ m = _release_file_re.search(line)
++ if m:
++ _u_version = m.group(1).strip()
++ m = _codename_file_re.search(line)
++ if m:
++ _u_id = m.group(1).strip()
++ if _u_distname and _u_version:
++ return (_u_distname, _u_version, _u_id)
++ except (EnvironmentError, UnboundLocalError):
++ pass
++
+ try:
+ etc = os.listdir('/etc')
+ except os.error:
diff --git a/examples/python2.7/patches/profiled-build.diff b/examples/python2.7/patches/profiled-build.diff
new file mode 100644
index 0000000..0bca60a
--- /dev/null
+++ b/examples/python2.7/patches/profiled-build.diff
@@ -0,0 +1,27 @@
+# DP: Fix profiled build; don't use Python/thread.gc*, gcc complains
+
+--- a/Makefile.pre.in
++++ b/Makefile.pre.in
+@@ -388,18 +388,18 @@
+ $(MAKE) build_all_use_profile
+
+ build_all_generate_profile:
+- $(MAKE) all CFLAGS="$(CFLAGS) -fprofile-generate" LIBS="$(LIBS) -lgcov"
++ $(MAKE) all PY_CFLAGS="$(PY_CFLAGS) -fprofile-generate" LIBS="$(LIBS) -lgcov"
+
+ run_profile_task:
+- ./$(BUILDPYTHON) $(PROFILE_TASK)
++ -./$(BUILDPYTHON) $(PROFILE_TASK)
+
+ build_all_use_profile:
+- $(MAKE) all CFLAGS="$(CFLAGS) -fprofile-use"
++ $(MAKE) all PY_CFLAGS="$(PY_CFLAGS) -fprofile-use -fprofile-correction"
+
+ coverage:
+ @echo "Building with support for coverage checking:"
+ $(MAKE) clean
+- $(MAKE) all CFLAGS="$(CFLAGS) -O0 -pg -fprofile-arcs -ftest-coverage" LIBS="$(LIBS) -lgcov"
++ $(MAKE) all PY_CFLAGS="$(PY_CFLAGS) -O0 -pg -fprofile-arcs -ftest-coverage" LIBS="$(LIBS) -lgcov"
+
+
+ # Build the interpreter
diff --git a/examples/python2.7/patches/pydebug-path.dpatch b/examples/python2.7/patches/pydebug-path.dpatch
new file mode 100644
index 0000000..652ff3b
--- /dev/null
+++ b/examples/python2.7/patches/pydebug-path.dpatch
@@ -0,0 +1,100 @@
+#! /bin/sh -e
+
+# DP: When built with --with-pydebug, add a debug directory
+# DP: <prefix>/lib-dynload/debug to sys.path just before
+# DP: <prefix>/lib-dynload und install the extension modules
+# DP: of the debug build in this directory.
+
+dir=
+if [ $# -eq 3 -a "$2" = '-d' ]; then
+ pdir="-d $3"
+ dir="$3/"
+elif [ $# -ne 1 ]; then
+ echo >&2 "usage: `basename $0`: -patch|-unpatch [-d <srcdir>]"
+ exit 1
+fi
+case "$1" in
+ -patch)
+ patch $pdir -f --no-backup-if-mismatch -p0 < $0
+ ;;
+ -unpatch)
+ patch $pdir -f --no-backup-if-mismatch -R -p0 < $0
+ ;;
+ *)
+ echo >&2 "usage: `basename $0`: -patch|-unpatch [-d <srcdir>]"
+ exit 1
+esac
+exit 0
+
+--- Modules/getpath.c.orig 2005-01-18 00:56:31.571961744 +0100
++++ Modules/getpath.c 2005-01-18 01:02:23.811413208 +0100
+@@ -112,9 +112,14 @@
+ #endif
+
+ #ifndef PYTHONPATH
++#ifdef Py_DEBUG
++#define PYTHONPATH PREFIX "/lib/python" VERSION ":" \
++ EXEC_PREFIX "/lib/python" VERSION "/lib-dynload/debug"
++#else
+ #define PYTHONPATH PREFIX "/lib/python" VERSION ":" \
+ EXEC_PREFIX "/lib/python" VERSION "/lib-dynload"
+ #endif
++#endif
+
+ #ifndef LANDMARK
+ #define LANDMARK "os.py"
+@@ -323,6 +328,9 @@
+ strncpy(exec_prefix, home, MAXPATHLEN);
+ joinpath(exec_prefix, lib_python);
+ joinpath(exec_prefix, "lib-dynload");
++#ifdef Py_DEBUG
++ joinpath(exec_prefix, "debug");
++#endif
+ return 1;
+ }
+
+@@ -340,6 +348,9 @@
+ n = strlen(exec_prefix);
+ joinpath(exec_prefix, lib_python);
+ joinpath(exec_prefix, "lib-dynload");
++#ifdef Py_DEBUG
++ joinpath(exec_prefix, "debug");
++#endif
+ if (isdir(exec_prefix))
+ return 1;
+ exec_prefix[n] = '\0';
+@@ -350,6 +361,9 @@
+ strncpy(exec_prefix, EXEC_PREFIX, MAXPATHLEN);
+ joinpath(exec_prefix, lib_python);
+ joinpath(exec_prefix, "lib-dynload");
++#ifdef Py_DEBUG
++ joinpath(exec_prefix, "debug");
++#endif
+ if (isdir(exec_prefix))
+ return 1;
+
+@@ -654,6 +654,9 @@
+ reduce(exec_prefix);
+ reduce(exec_prefix);
+ reduce(exec_prefix);
++#ifdef Py_DEBUG
++ reduce(exec_prefix);
++#endif
+ if (!exec_prefix[0])
+ strcpy(exec_prefix, separator);
+ }
+--- Lib/site.py~ 2004-12-04 00:39:05.000000000 +0100
++++ Lib/site.py 2005-01-18 01:33:36.589707632 +0100
+@@ -188,6 +188,12 @@
+ "python" + sys.version[:3],
+ "site-packages"),
+ os.path.join(prefix, "lib", "site-python")]
++ try:
++ # sys.getobjects only available in --with-pydebug build
++ sys.getobjects
++ sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
++ except AttributeError:
++ pass
+ else:
+ sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
+ if sys.platform == 'darwin':
diff --git a/examples/python2.7/patches/revert-issue14635.diff b/examples/python2.7/patches/revert-issue14635.diff
new file mode 100644
index 0000000..47a2bdd
--- /dev/null
+++ b/examples/python2.7/patches/revert-issue14635.diff
@@ -0,0 +1,294 @@
+diff -urN a/Lib/telnetlib.py b/Lib/telnetlib.py
+--- a/Lib/telnetlib.py 2012-12-25 13:41:08.467405725 +0100
++++ b/Lib/telnetlib.py 2012-12-25 14:00:31.339404759 +0100
+@@ -34,7 +34,6 @@
+
+
+ # Imported modules
+-import errno
+ import sys
+ import socket
+ import select
+@@ -206,7 +205,6 @@
+ self.sb = 0 # flag for SB and SE sequence.
+ self.sbdataq = ''
+ self.option_callback = None
+- self._has_poll = hasattr(select, 'poll')
+ if host is not None:
+ self.open(host, port, timeout)
+
+@@ -289,61 +287,6 @@
+ is closed and no cooked data is available.
+
+ """
+- if self._has_poll:
+- return self._read_until_with_poll(match, timeout)
+- else:
+- return self._read_until_with_select(match, timeout)
+-
+- def _read_until_with_poll(self, match, timeout):
+- """Read until a given string is encountered or until timeout.
+-
+- This method uses select.poll() to implement the timeout.
+- """
+- n = len(match)
+- call_timeout = timeout
+- if timeout is not None:
+- from time import time
+- time_start = time()
+- self.process_rawq()
+- i = self.cookedq.find(match)
+- if i < 0:
+- poller = select.poll()
+- poll_in_or_priority_flags = select.POLLIN | select.POLLPRI
+- poller.register(self, poll_in_or_priority_flags)
+- while i < 0 and not self.eof:
+- try:
+- ready = poller.poll(call_timeout)
+- except select.error as e:
+- if e.errno == errno.EINTR:
+- if timeout is not None:
+- elapsed = time() - time_start
+- call_timeout = timeout-elapsed
+- continue
+- raise
+- for fd, mode in ready:
+- if mode & poll_in_or_priority_flags:
+- i = max(0, len(self.cookedq)-n)
+- self.fill_rawq()
+- self.process_rawq()
+- i = self.cookedq.find(match, i)
+- if timeout is not None:
+- elapsed = time() - time_start
+- if elapsed >= timeout:
+- break
+- call_timeout = timeout-elapsed
+- poller.unregister(self)
+- if i >= 0:
+- i = i + n
+- buf = self.cookedq[:i]
+- self.cookedq = self.cookedq[i:]
+- return buf
+- return self.read_very_lazy()
+-
+- def _read_until_with_select(self, match, timeout=None):
+- """Read until a given string is encountered or until timeout.
+-
+- The timeout is implemented using select.select().
+- """
+ n = len(match)
+ self.process_rawq()
+ i = self.cookedq.find(match)
+@@ -646,79 +589,6 @@
+ results are undeterministic, and may depend on the I/O timing.
+
+ """
+- if self._has_poll:
+- return self._expect_with_poll(list, timeout)
+- else:
+- return self._expect_with_select(list, timeout)
+-
+- def _expect_with_poll(self, expect_list, timeout=None):
+- """Read until one from a list of a regular expressions matches.
+-
+- This method uses select.poll() to implement the timeout.
+- """
+- re = None
+- expect_list = expect_list[:]
+- indices = range(len(expect_list))
+- for i in indices:
+- if not hasattr(expect_list[i], "search"):
+- if not re: import re
+- expect_list[i] = re.compile(expect_list[i])
+- call_timeout = timeout
+- if timeout is not None:
+- from time import time
+- time_start = time()
+- self.process_rawq()
+- m = None
+- for i in indices:
+- m = expect_list[i].search(self.cookedq)
+- if m:
+- e = m.end()
+- text = self.cookedq[:e]
+- self.cookedq = self.cookedq[e:]
+- break
+- if not m:
+- poller = select.poll()
+- poll_in_or_priority_flags = select.POLLIN | select.POLLPRI
+- poller.register(self, poll_in_or_priority_flags)
+- while not m and not self.eof:
+- try:
+- ready = poller.poll(call_timeout)
+- except select.error as e:
+- if e.errno == errno.EINTR:
+- if timeout is not None:
+- elapsed = time() - time_start
+- call_timeout = timeout-elapsed
+- continue
+- raise
+- for fd, mode in ready:
+- if mode & poll_in_or_priority_flags:
+- self.fill_rawq()
+- self.process_rawq()
+- for i in indices:
+- m = expect_list[i].search(self.cookedq)
+- if m:
+- e = m.end()
+- text = self.cookedq[:e]
+- self.cookedq = self.cookedq[e:]
+- break
+- if timeout is not None:
+- elapsed = time() - time_start
+- if elapsed >= timeout:
+- break
+- call_timeout = timeout-elapsed
+- poller.unregister(self)
+- if m:
+- return (i, m, text)
+- text = self.read_very_lazy()
+- if not text and self.eof:
+- raise EOFError
+- return (-1, None, text)
+-
+- def _expect_with_select(self, list, timeout=None):
+- """Read until one from a list of a regular expressions matches.
+-
+- The timeout is implemented using select.select().
+- """
+ re = None
+ list = list[:]
+ indices = range(len(list))
+diff -urN a/Lib/test/test_telnetlib.py b/Lib/test/test_telnetlib.py
+--- a/Lib/test/test_telnetlib.py 2012-12-25 13:41:08.499405725 +0100
++++ b/Lib/test/test_telnetlib.py 2012-12-25 14:00:31.339404759 +0100
+@@ -136,28 +136,6 @@
+ self.assertEqual(data, want[0])
+ self.assertEqual(telnet.read_all(), 'not seen')
+
+- def test_read_until_with_poll(self):
+- """Use select.poll() to implement telnet.read_until()."""
+- want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
+- self.dataq.put(want)
+- telnet = telnetlib.Telnet(HOST, self.port)
+- if not telnet._has_poll:
+- raise unittest.SkipTest('select.poll() is required')
+- telnet._has_poll = True
+- self.dataq.join()
+- data = telnet.read_until('match')
+- self.assertEqual(data, ''.join(want[:-2]))
+-
+- def test_read_until_with_select(self):
+- """Use select.select() to implement telnet.read_until()."""
+- want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
+- self.dataq.put(want)
+- telnet = telnetlib.Telnet(HOST, self.port)
+- telnet._has_poll = False
+- self.dataq.join()
+- data = telnet.read_until('match')
+- self.assertEqual(data, ''.join(want[:-2]))
+-
+ def test_read_all_A(self):
+ """
+ read_all()
+@@ -380,75 +358,8 @@
+ self.assertEqual('', telnet.read_sb_data())
+ nego.sb_getter = None # break the nego => telnet cycle
+
+-
+-class ExpectTests(TestCase):
+- def setUp(self):
+- self.evt = threading.Event()
+- self.dataq = Queue.Queue()
+- self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+- self.sock.settimeout(10)
+- self.port = test_support.bind_port(self.sock)
+- self.thread = threading.Thread(target=server, args=(self.evt,self.sock,
+- self.dataq))
+- self.thread.start()
+- self.evt.wait()
+-
+- def tearDown(self):
+- self.thread.join()
+-
+- # use a similar approach to testing timeouts as test_timeout.py
+- # these will never pass 100% but make the fuzz big enough that it is rare
+- block_long = 0.6
+- block_short = 0.3
+- def test_expect_A(self):
+- """
+- expect(expected, [timeout])
+- Read until the expected string has been seen, or a timeout is
+- hit (default is no timeout); may block.
+- """
+- want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
+- self.dataq.put(want)
+- telnet = telnetlib.Telnet(HOST, self.port)
+- self.dataq.join()
+- (_,_,data) = telnet.expect(['match'])
+- self.assertEqual(data, ''.join(want[:-2]))
+-
+- def test_expect_B(self):
+- # test the timeout - it does NOT raise socket.timeout
+- want = ['hello', self.block_long, 'not seen', EOF_sigil]
+- self.dataq.put(want)
+- telnet = telnetlib.Telnet(HOST, self.port)
+- self.dataq.join()
+- (_,_,data) = telnet.expect(['not seen'], self.block_short)
+- self.assertEqual(data, want[0])
+- self.assertEqual(telnet.read_all(), 'not seen')
+-
+- def test_expect_with_poll(self):
+- """Use select.poll() to implement telnet.expect()."""
+- want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
+- self.dataq.put(want)
+- telnet = telnetlib.Telnet(HOST, self.port)
+- if not telnet._has_poll:
+- raise unittest.SkipTest('select.poll() is required')
+- telnet._has_poll = True
+- self.dataq.join()
+- (_,_,data) = telnet.expect(['match'])
+- self.assertEqual(data, ''.join(want[:-2]))
+-
+- def test_expect_with_select(self):
+- """Use select.select() to implement telnet.expect()."""
+- want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
+- self.dataq.put(want)
+- telnet = telnetlib.Telnet(HOST, self.port)
+- telnet._has_poll = False
+- self.dataq.join()
+- (_,_,data) = telnet.expect(['match'])
+- self.assertEqual(data, ''.join(want[:-2]))
+-
+-
+ def test_main(verbose=None):
+- test_support.run_unittest(GeneralTests, ReadTests, OptionTests,
+- ExpectTests)
++ test_support.run_unittest(GeneralTests, ReadTests, OptionTests)
+
+ if __name__ == '__main__':
+ test_main()
+diff -urN a/Misc/ACKS b/Misc/ACKS
+--- a/Misc/ACKS 2012-12-25 13:48:09.675405378 +0100
++++ b/Misc/ACKS 2012-12-25 14:00:31.339404759 +0100
+@@ -372,7 +372,6 @@
+ Albert Hofkamp
+ Tomas Hoger
+ Jonathan Hogg
+-Akintayo Holder
+ Gerrit Holl
+ Shane Holloway
+ Rune Holm
+diff -urN a/Misc/NEWS b/Misc/NEWS
+--- a/Misc/NEWS 2012-12-25 13:48:48.947405345 +0100
++++ b/Misc/NEWS 2012-12-25 14:00:31.347404759 +0100
+@@ -123,9 +123,6 @@
+ - Issue #6493: An issue in ctypes on Windows that caused structure bitfields
+ of type ctypes.c_uint32 and width 32 to incorrectly be set has been fixed.
+
+-- Issue #14635: telnetlib will use poll() rather than select() when possible
+- to avoid failing due to the select() file descriptor limit.
+-
+ - Issue #15247: FileIO now raises an error when given a file descriptor
+ pointing to a directory.
+
diff --git a/examples/python2.7/patches/revert-sizeof-methods.diff b/examples/python2.7/patches/revert-sizeof-methods.diff
new file mode 100644
index 0000000..c0b3735
--- /dev/null
+++ b/examples/python2.7/patches/revert-sizeof-methods.diff
@@ -0,0 +1,1062 @@
+diff -urN a/Include/node.h b/Include/node.h
+--- a/Include/node.h 2012-12-25 13:41:08.431405725 +0100
++++ b/Include/node.h 2012-12-25 13:48:19.839405366 +0100
+@@ -20,9 +20,6 @@
+ PyAPI_FUNC(int) PyNode_AddChild(node *n, int type,
+ char *str, int lineno, int col_offset);
+ PyAPI_FUNC(void) PyNode_Free(node *n);
+-#ifndef Py_LIMITED_API
+-Py_ssize_t _PyNode_SizeOf(node *n);
+-#endif
+
+ /* Node access functions */
+ #define NCH(n) ((n)->n_nchildren)
+diff -urN a/Lib/test/test_array.py b/Lib/test/test_array.py
+--- a/Lib/test/test_array.py 2012-12-25 13:41:08.471405725 +0100
++++ b/Lib/test/test_array.py 2012-12-25 13:48:09.671405378 +0100
+@@ -985,19 +985,6 @@
+ upper = long(pow(2, a.itemsize * 8)) - 1L
+ self.check_overflow(lower, upper)
+
+- @test_support.cpython_only
+- def test_sizeof_with_buffer(self):
+- a = array.array(self.typecode, self.example)
+- basesize = test_support.calcvobjsize('4P')
+- buffer_size = a.buffer_info()[1] * a.itemsize
+- test_support.check_sizeof(self, a, basesize + buffer_size)
+-
+- @test_support.cpython_only
+- def test_sizeof_without_buffer(self):
+- a = array.array(self.typecode)
+- basesize = test_support.calcvobjsize('4P')
+- test_support.check_sizeof(self, a, basesize)
+-
+
+ class ByteTest(SignedNumberTest):
+ typecode = 'b'
+diff -urN a/Lib/test/test_deque.py b/Lib/test/test_deque.py
+--- a/Lib/test/test_deque.py 2012-12-25 13:41:08.471405725 +0100
++++ b/Lib/test/test_deque.py 2012-12-25 13:48:19.843405367 +0100
+@@ -6,7 +6,6 @@
+ import copy
+ import cPickle as pickle
+ import random
+-import struct
+
+ BIG = 100000
+
+@@ -518,21 +517,6 @@
+ gc.collect()
+ self.assertTrue(ref() is None, "Cycle was not collected")
+
+- check_sizeof = test_support.check_sizeof
+-
+- @test_support.cpython_only
+- def test_sizeof(self):
+- BLOCKLEN = 62
+- basesize = test_support.calcobjsize('2P4PlP')
+- blocksize = struct.calcsize('2P%dP' % BLOCKLEN)
+- self.assertEqual(object.__sizeof__(deque()), basesize)
+- check = self.check_sizeof
+- check(deque(), basesize + blocksize)
+- check(deque('a'), basesize + blocksize)
+- check(deque('a' * (BLOCKLEN // 2)), basesize + blocksize)
+- check(deque('a' * (BLOCKLEN // 2 + 1)), basesize + 2 * blocksize)
+- check(deque('a' * (42 * BLOCKLEN)), basesize + 43 * blocksize)
+-
+ class TestVariousIteratorArgs(unittest.TestCase):
+
+ def test_constructor(self):
+diff -urN a/Lib/test/test_io.py b/Lib/test/test_io.py
+--- a/Lib/test/test_io.py 2012-12-25 13:41:08.487405725 +0100
++++ b/Lib/test/test_io.py 2012-12-25 13:48:30.887405360 +0100
+@@ -748,20 +748,6 @@
+ buf.raw = x
+
+
+-class SizeofTest:
+-
+- @support.cpython_only
+- def test_sizeof(self):
+- bufsize1 = 4096
+- bufsize2 = 8192
+- rawio = self.MockRawIO()
+- bufio = self.tp(rawio, buffer_size=bufsize1)
+- size = sys.getsizeof(bufio) - bufsize1
+- rawio = self.MockRawIO()
+- bufio = self.tp(rawio, buffer_size=bufsize2)
+- self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
+-
+-
+ class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
+ read_mode = "rb"
+
+@@ -945,7 +931,7 @@
+ "failed for {}: {} != 0".format(n, rawio._extraneous_reads))
+
+
+-class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
++class CBufferedReaderTest(BufferedReaderTest):
+ tp = io.BufferedReader
+
+ def test_constructor(self):
+@@ -1208,7 +1194,7 @@
+ self.tp(self.MockRawIO(), 8, 12)
+
+
+-class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
++class CBufferedWriterTest(BufferedWriterTest):
+ tp = io.BufferedWriter
+
+ def test_constructor(self):
+@@ -1596,8 +1582,8 @@
+ f.flush()
+ self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
+
+-class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
+- BufferedRandomTest, SizeofTest):
++
++class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest, BufferedRandomTest):
+ tp = io.BufferedRandom
+
+ def test_constructor(self):
+diff -urN a/Lib/test/test_memoryio.py b/Lib/test/test_memoryio.py
+--- a/Lib/test/test_memoryio.py 2012-12-25 13:41:08.487405725 +0100
++++ b/Lib/test/test_memoryio.py 2012-12-25 13:48:30.891405359 +0100
+@@ -638,17 +638,6 @@
+ memio.close()
+ self.assertRaises(ValueError, memio.__setstate__, (b"closed", 0, None))
+
+- check_sizeof = support.check_sizeof
+-
+- @support.cpython_only
+- def test_sizeof(self):
+- basesize = support.calcobjsize(b'P2PP2P')
+- check = self.check_sizeof
+- self.assertEqual(object.__sizeof__(io.BytesIO()), basesize)
+- check(io.BytesIO(), basesize )
+- check(io.BytesIO(b'a'), basesize + 1 + 1 )
+- check(io.BytesIO(b'a' * 1000), basesize + 1000 + 1 )
+-
+
+ class CStringIOTest(PyStringIOTest):
+ ioclass = io.StringIO
+diff -urN a/Lib/test/test_parser.py b/Lib/test/test_parser.py
+--- a/Lib/test/test_parser.py 2012-12-25 13:41:08.487405725 +0100
++++ b/Lib/test/test_parser.py 2012-12-25 13:48:19.847405368 +0100
+@@ -1,8 +1,7 @@
+ import parser
+ import unittest
+ import sys
+-import struct
+-from test import test_support as support
++from test import test_support
+
+ #
+ # First, we test that we can generate trees from valid source fragments,
+@@ -584,57 +583,12 @@
+ print >>sys.stderr, "Expecting 's_push: parser stack overflow' in next line"
+ self.assertRaises(MemoryError, parser.expr, e)
+
+-class STObjectTestCase(unittest.TestCase):
+- """Test operations on ST objects themselves"""
+-
+- check_sizeof = support.check_sizeof
+-
+- @support.cpython_only
+- def test_sizeof(self):
+- def XXXROUNDUP(n):
+- if n <= 1:
+- return n
+- if n <= 128:
+- return (n + 3) & ~3
+- return 1 << (n - 1).bit_length()
+-
+- basesize = support.calcobjsize('Pii')
+- nodesize = struct.calcsize('hP3iP0h')
+- def sizeofchildren(node):
+- if node is None:
+- return 0
+- res = 0
+- hasstr = len(node) > 1 and isinstance(node[-1], str)
+- if hasstr:
+- res += len(node[-1]) + 1
+- children = node[1:-1] if hasstr else node[1:]
+- if children:
+- res += XXXROUNDUP(len(children)) * nodesize
+- for child in children:
+- res += sizeofchildren(child)
+- return res
+-
+- def check_st_sizeof(st):
+- self.check_sizeof(st, basesize + nodesize +
+- sizeofchildren(st.totuple()))
+-
+- check_st_sizeof(parser.expr('2 + 3'))
+- check_st_sizeof(parser.expr('2 + 3 + 4'))
+- check_st_sizeof(parser.suite('x = 2 + 3'))
+- check_st_sizeof(parser.suite(''))
+- check_st_sizeof(parser.suite('# -*- coding: utf-8 -*-'))
+- check_st_sizeof(parser.expr('[' + '2,' * 1000 + ']'))
+-
+-
+- # XXX tests for pickling and unpickling of ST objects should go here
+-
+ def test_main():
+- support.run_unittest(
++ test_support.run_unittest(
+ RoundtripLegalSyntaxTestCase,
+ IllegalSyntaxTestCase,
+ CompileTestCase,
+ ParserStackLimitTestCase,
+- STObjectTestCase,
+ )
+
+
+diff -urN a/Lib/test/test_struct.py b/Lib/test/test_struct.py
+--- a/Lib/test/test_struct.py 2012-12-25 13:41:08.495405725 +0100
++++ b/Lib/test/test_struct.py 2012-12-25 13:48:48.943405345 +0100
+@@ -3,8 +3,7 @@
+ import unittest
+ import struct
+ import inspect
+-from test import test_support as support
+-from test.test_support import (check_warnings, check_py3k_warnings)
++from test.test_support import run_unittest, check_warnings, check_py3k_warnings
+
+ import sys
+ ISBIGENDIAN = sys.byteorder == "big"
+@@ -545,29 +544,8 @@
+ hugecount2 = '{}b{}H'.format(sys.maxsize//2, sys.maxsize//2)
+ self.assertRaises(struct.error, struct.calcsize, hugecount2)
+
+- def check_sizeof(self, format_str, number_of_codes):
+- # The size of 'PyStructObject'
+- totalsize = support.calcobjsize('5P')
+- # The size taken up by the 'formatcode' dynamic array
+- totalsize += struct.calcsize('3P') * (number_of_codes + 1)
+- support.check_sizeof(self, struct.Struct(format_str), totalsize)
+-
+- @support.cpython_only
+- def test__sizeof__(self):
+- for code in integer_codes:
+- self.check_sizeof(code, 1)
+- self.check_sizeof('BHILfdspP', 9)
+- self.check_sizeof('B' * 1234, 1234)
+- self.check_sizeof('fd', 2)
+- self.check_sizeof('xxxxxxxxxxxxxx', 0)
+- self.check_sizeof('100H', 100)
+- self.check_sizeof('187s', 1)
+- self.check_sizeof('20p', 1)
+- self.check_sizeof('0s', 1)
+- self.check_sizeof('0c', 0)
+-
+ def test_main():
+- support.run_unittest(StructTest)
++ run_unittest(StructTest)
+
+ if __name__ == '__main__':
+ test_main()
+diff -urN a/Lib/test/test_support.py b/Lib/test/test_support.py
+--- a/Lib/test/test_support.py 2012-12-25 13:41:08.495405725 +0100
++++ b/Lib/test/test_support.py 2012-12-25 13:48:30.891405359 +0100
+@@ -18,8 +18,6 @@
+ import UserDict
+ import re
+ import time
+-import struct
+-import _testcapi
+ try:
+ import thread
+ except ImportError:
+@@ -927,32 +925,6 @@
+ gc.collect()
+
+
+-_header = '2P'
+-if hasattr(sys, "gettotalrefcount"):
+- _header = '2P' + _header
+-_vheader = _header + 'P'
+-
+-def calcobjsize(fmt):
+- return struct.calcsize(_header + fmt + '0P')
+-
+-def calcvobjsize(fmt):
+- return struct.calcsize(_vheader + fmt + '0P')
+-
+-
+-_TPFLAGS_HAVE_GC = 1<<14
+-_TPFLAGS_HEAPTYPE = 1<<9
+-
+-def check_sizeof(test, o, size):
+- result = sys.getsizeof(o)
+- # add GC header size
+- if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
+- ((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
+- size += _testcapi.SIZEOF_PYGC_HEAD
+- msg = 'wrong size for %s: got %d, expected %d' \
+- % (type(o), result, size)
+- test.assertEqual(result, size, msg)
+-
+-
+ #=======================================================================
+ # Decorator for running a function in a different locale, correctly resetting
+ # it afterwards.
+diff -urN a/Lib/test/test_sys.py b/Lib/test/test_sys.py
+--- a/Lib/test/test_sys.py 2012-12-25 13:41:08.499405725 +0100
++++ b/Lib/test/test_sys.py 2012-12-25 13:48:30.895405358 +0100
+@@ -490,8 +490,22 @@
+
+ class SizeofTest(unittest.TestCase):
+
++ TPFLAGS_HAVE_GC = 1<<14
++ TPFLAGS_HEAPTYPE = 1L<<9
++
+ def setUp(self):
+- self.P = struct.calcsize('P')
++ self.c = len(struct.pack('c', ' '))
++ self.H = len(struct.pack('H', 0))
++ self.i = len(struct.pack('i', 0))
++ self.l = len(struct.pack('l', 0))
++ self.P = len(struct.pack('P', 0))
++ # due to missing size_t information from struct, it is assumed that
++ # sizeof(Py_ssize_t) = sizeof(void*)
++ self.header = 'PP'
++ self.vheader = self.header + 'P'
++ if hasattr(sys, "gettotalrefcount"):
++ self.header += '2P'
++ self.vheader += '2P'
+ self.longdigit = sys.long_info.sizeof_digit
+ import _testcapi
+ self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
+@@ -501,109 +515,128 @@
+ self.file.close()
+ test.test_support.unlink(test.test_support.TESTFN)
+
+- check_sizeof = test.test_support.check_sizeof
++ def check_sizeof(self, o, size):
++ result = sys.getsizeof(o)
++ if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
++ ((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
++ size += self.gc_headsize
++ msg = 'wrong size for %s: got %d, expected %d' \
++ % (type(o), result, size)
++ self.assertEqual(result, size, msg)
++
++ def calcsize(self, fmt):
++ """Wrapper around struct.calcsize which enforces the alignment of the
++ end of a structure to the alignment requirement of pointer.
++
++ Note: This wrapper should only be used if a pointer member is included
++ and no member with a size larger than a pointer exists.
++ """
++ return struct.calcsize(fmt + '0P')
+
+ def test_gc_head_size(self):
+ # Check that the gc header size is added to objects tracked by the gc.
+- size = test.test_support.calcobjsize
++ h = self.header
++ size = self.calcsize
+ gc_header_size = self.gc_headsize
+ # bool objects are not gc tracked
+- self.assertEqual(sys.getsizeof(True), size('l'))
++ self.assertEqual(sys.getsizeof(True), size(h + 'l'))
+ # but lists are
+- self.assertEqual(sys.getsizeof([]), size('P PP') + gc_header_size)
++ self.assertEqual(sys.getsizeof([]), size(h + 'P PP') + gc_header_size)
+
+ def test_default(self):
+- size = test.test_support.calcobjsize
+- self.assertEqual(sys.getsizeof(True, -1), size('l'))
++ h = self.header
++ size = self.calcsize
++ self.assertEqual(sys.getsizeof(True, -1), size(h + 'l'))
+
+ def test_objecttypes(self):
+ # check all types defined in Objects/
+- size = test.test_support.calcobjsize
+- vsize = test.test_support.calcvobjsize
++ h = self.header
++ vh = self.vheader
++ size = self.calcsize
+ check = self.check_sizeof
+ # bool
+- check(True, size('l'))
++ check(True, size(h + 'l'))
+ # buffer
+ with test.test_support.check_py3k_warnings():
+- check(buffer(''), size('2P2Pil'))
++ check(buffer(''), size(h + '2P2Pil'))
+ # builtin_function_or_method
+- check(len, size('3P'))
++ check(len, size(h + '3P'))
+ # bytearray
+ samples = ['', 'u'*100000]
+ for sample in samples:
+ x = bytearray(sample)
+- check(x, vsize('iPP') + x.__alloc__())
++ check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
+ # bytearray_iterator
+- check(iter(bytearray()), size('PP'))
++ check(iter(bytearray()), size(h + 'PP'))
+ # cell
+ def get_cell():
+ x = 42
+ def inner():
+ return x
+ return inner
+- check(get_cell().func_closure[0], size('P'))
++ check(get_cell().func_closure[0], size(h + 'P'))
+ # classobj (old-style class)
+ class class_oldstyle():
+ def method():
+ pass
+- check(class_oldstyle, size('7P'))
++ check(class_oldstyle, size(h + '7P'))
+ # instance (old-style class)
+- check(class_oldstyle(), size('3P'))
++ check(class_oldstyle(), size(h + '3P'))
+ # instancemethod (old-style class)
+- check(class_oldstyle().method, size('4P'))
++ check(class_oldstyle().method, size(h + '4P'))
+ # complex
+- check(complex(0,1), size('2d'))
++ check(complex(0,1), size(h + '2d'))
+ # code
+- check(get_cell().func_code, size('4i8Pi3P'))
++ check(get_cell().func_code, size(h + '4i8Pi3P'))
+ # BaseException
+- check(BaseException(), size('3P'))
++ check(BaseException(), size(h + '3P'))
+ # UnicodeEncodeError
+- check(UnicodeEncodeError("", u"", 0, 0, ""), size('5P2PP'))
++ check(UnicodeEncodeError("", u"", 0, 0, ""), size(h + '5P2PP'))
+ # UnicodeDecodeError
+- check(UnicodeDecodeError("", "", 0, 0, ""), size('5P2PP'))
++ check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
+ # UnicodeTranslateError
+- check(UnicodeTranslateError(u"", 0, 1, ""), size('5P2PP'))
++ check(UnicodeTranslateError(u"", 0, 1, ""), size(h + '5P2PP'))
+ # method_descriptor (descriptor object)
+- check(str.lower, size('2PP'))
++ check(str.lower, size(h + '2PP'))
+ # classmethod_descriptor (descriptor object)
+ # XXX
+ # member_descriptor (descriptor object)
+ import datetime
+- check(datetime.timedelta.days, size('2PP'))
++ check(datetime.timedelta.days, size(h + '2PP'))
+ # getset_descriptor (descriptor object)
+ import __builtin__
+- check(__builtin__.file.closed, size('2PP'))
++ check(__builtin__.file.closed, size(h + '2PP'))
+ # wrapper_descriptor (descriptor object)
+- check(int.__add__, size('2P2P'))
++ check(int.__add__, size(h + '2P2P'))
+ # dictproxy
+ class C(object): pass
+- check(C.__dict__, size('P'))
++ check(C.__dict__, size(h + 'P'))
+ # method-wrapper (descriptor object)
+- check({}.__iter__, size('2P'))
++ check({}.__iter__, size(h + '2P'))
+ # dict
+- check({}, size('3P2P' + 8*'P2P'))
++ check({}, size(h + '3P2P' + 8*'P2P'))
+ x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
+- check(x, size('3P2P' + 8*'P2P') + 16*struct.calcsize('P2P'))
++ check(x, size(h + '3P2P' + 8*'P2P') + 16*size('P2P'))
+ # dictionary-keyiterator
+- check({}.iterkeys(), size('P2PPP'))
++ check({}.iterkeys(), size(h + 'P2PPP'))
+ # dictionary-valueiterator
+- check({}.itervalues(), size('P2PPP'))
++ check({}.itervalues(), size(h + 'P2PPP'))
+ # dictionary-itemiterator
+- check({}.iteritems(), size('P2PPP'))
++ check({}.iteritems(), size(h + 'P2PPP'))
+ # ellipses
+- check(Ellipsis, size(''))
++ check(Ellipsis, size(h + ''))
+ # EncodingMap
+ import codecs, encodings.iso8859_3
+ x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
+- check(x, size('32B2iB'))
++ check(x, size(h + '32B2iB'))
+ # enumerate
+- check(enumerate([]), size('l3P'))
++ check(enumerate([]), size(h + 'l3P'))
+ # file
+- check(self.file, size('4P2i4P3i3P3i'))
++ check(self.file, size(h + '4P2i4P3i3P3i'))
+ # float
+- check(float(0), size('d'))
++ check(float(0), size(h + 'd'))
+ # sys.floatinfo
+- check(sys.float_info, vsize('') + self.P * len(sys.float_info))
++ check(sys.float_info, size(vh) + self.P * len(sys.float_info))
+ # frame
+ import inspect
+ CO_MAXBLOCKS = 20
+@@ -612,10 +645,10 @@
+ nfrees = len(x.f_code.co_freevars)
+ extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
+ ncells + nfrees - 1
+- check(x, vsize('12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
++ check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
+ # function
+ def func(): pass
+- check(func, size('9P'))
++ check(func, size(h + '9P'))
+ class c():
+ @staticmethod
+ def foo():
+@@ -624,65 +657,65 @@
+ def bar(cls):
+ pass
+ # staticmethod
+- check(foo, size('P'))
++ check(foo, size(h + 'P'))
+ # classmethod
+- check(bar, size('P'))
++ check(bar, size(h + 'P'))
+ # generator
+ def get_gen(): yield 1
+- check(get_gen(), size('Pi2P'))
++ check(get_gen(), size(h + 'Pi2P'))
+ # integer
+- check(1, size('l'))
+- check(100, size('l'))
++ check(1, size(h + 'l'))
++ check(100, size(h + 'l'))
+ # iterator
+- check(iter('abc'), size('lP'))
++ check(iter('abc'), size(h + 'lP'))
+ # callable-iterator
+ import re
+- check(re.finditer('',''), size('2P'))
++ check(re.finditer('',''), size(h + '2P'))
+ # list
+ samples = [[], [1,2,3], ['1', '2', '3']]
+ for sample in samples:
+- check(sample, vsize('PP') + len(sample)*self.P)
++ check(sample, size(vh + 'PP') + len(sample)*self.P)
+ # sortwrapper (list)
+ # XXX
+ # cmpwrapper (list)
+ # XXX
+ # listiterator (list)
+- check(iter([]), size('lP'))
++ check(iter([]), size(h + 'lP'))
+ # listreverseiterator (list)
+- check(reversed([]), size('lP'))
++ check(reversed([]), size(h + 'lP'))
+ # long
+- check(0L, vsize(''))
+- check(1L, vsize('') + self.longdigit)
+- check(-1L, vsize('') + self.longdigit)
++ check(0L, size(vh))
++ check(1L, size(vh) + self.longdigit)
++ check(-1L, size(vh) + self.longdigit)
+ PyLong_BASE = 2**sys.long_info.bits_per_digit
+- check(long(PyLong_BASE), vsize('') + 2*self.longdigit)
+- check(long(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
+- check(long(PyLong_BASE**2), vsize('') + 3*self.longdigit)
++ check(long(PyLong_BASE), size(vh) + 2*self.longdigit)
++ check(long(PyLong_BASE**2-1), size(vh) + 2*self.longdigit)
++ check(long(PyLong_BASE**2), size(vh) + 3*self.longdigit)
+ # module
+- check(unittest, size('P'))
++ check(unittest, size(h + 'P'))
+ # None
+- check(None, size(''))
++ check(None, size(h + ''))
+ # object
+- check(object(), size(''))
++ check(object(), size(h + ''))
+ # property (descriptor object)
+ class C(object):
+ def getx(self): return self.__x
+ def setx(self, value): self.__x = value
+ def delx(self): del self.__x
+ x = property(getx, setx, delx, "")
+- check(x, size('4Pi'))
++ check(x, size(h + '4Pi'))
+ # PyCObject
+ # PyCapsule
+ # XXX
+ # rangeiterator
+- check(iter(xrange(1)), size('4l'))
++ check(iter(xrange(1)), size(h + '4l'))
+ # reverse
+- check(reversed(''), size('PP'))
++ check(reversed(''), size(h + 'PP'))
+ # set
+ # frozenset
+ PySet_MINSIZE = 8
+ samples = [[], range(10), range(50)]
+- s = size('3P2P' + PySet_MINSIZE*'lP' + 'lP')
++ s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
+ for sample in samples:
+ minused = len(sample)
+ if minused == 0: tmp = 1
+@@ -699,24 +732,23 @@
+ check(set(sample), s + newsize*struct.calcsize('lP'))
+ check(frozenset(sample), s + newsize*struct.calcsize('lP'))
+ # setiterator
+- check(iter(set()), size('P3P'))
++ check(iter(set()), size(h + 'P3P'))
+ # slice
+- check(slice(1), size('3P'))
++ check(slice(1), size(h + '3P'))
+ # str
+- vh = test.test_support._vheader
+- check('', struct.calcsize(vh + 'lic'))
+- check('abc', struct.calcsize(vh + 'lic') + 3)
++ check('', struct.calcsize(vh + 'li') + 1)
++ check('abc', struct.calcsize(vh + 'li') + 1 + 3*self.c)
+ # super
+- check(super(int), size('3P'))
++ check(super(int), size(h + '3P'))
+ # tuple
+- check((), vsize(''))
+- check((1,2,3), vsize('') + 3*self.P)
++ check((), size(vh))
++ check((1,2,3), size(vh) + 3*self.P)
+ # tupleiterator
+- check(iter(()), size('lP'))
++ check(iter(()), size(h + 'lP'))
+ # type
+ # (PyTypeObject + PyNumberMethods + PyMappingMethods +
+ # PySequenceMethods + PyBufferProcs)
+- s = vsize('P2P15Pl4PP9PP11PI') + struct.calcsize('41P 10P 3P 6P')
++ s = size(vh + 'P2P15Pl4PP9PP11PI') + size('41P 10P 3P 6P')
+ class newstyleclass(object):
+ pass
+ check(newstyleclass, s)
+@@ -731,40 +763,41 @@
+ # we need to test for both sizes, because we don't know if the string
+ # has been cached
+ for s in samples:
+- check(s, size('PPlP') + usize * (len(s) + 1))
++ check(s, size(h + 'PPlP') + usize * (len(s) + 1))
+ # weakref
+ import weakref
+- check(weakref.ref(int), size('2Pl2P'))
++ check(weakref.ref(int), size(h + '2Pl2P'))
+ # weakproxy
+ # XXX
+ # weakcallableproxy
+- check(weakref.proxy(int), size('2Pl2P'))
++ check(weakref.proxy(int), size(h + '2Pl2P'))
+ # xrange
+- check(xrange(1), size('3l'))
+- check(xrange(66000), size('3l'))
++ check(xrange(1), size(h + '3l'))
++ check(xrange(66000), size(h + '3l'))
+
+ def test_pythontypes(self):
+ # check all types defined in Python/
+- size = test.test_support.calcobjsize
+- vsize = test.test_support.calcvobjsize
++ h = self.header
++ vh = self.vheader
++ size = self.calcsize
+ check = self.check_sizeof
+ # _ast.AST
+ import _ast
+- check(_ast.AST(), size(''))
++ check(_ast.AST(), size(h + ''))
+ # imp.NullImporter
+ import imp
+- check(imp.NullImporter(self.file.name), size(''))
++ check(imp.NullImporter(self.file.name), size(h + ''))
+ try:
+ raise TypeError
+ except TypeError:
+ tb = sys.exc_info()[2]
+ # traceback
+ if tb != None:
+- check(tb, size('2P2i'))
++ check(tb, size(h + '2P2i'))
+ # symtable entry
+ # XXX
+ # sys.flags
+- check(sys.flags, vsize('') + self.P * len(sys.flags))
++ check(sys.flags, size(vh) + self.P * len(sys.flags))
+
+
+ def test_main():
+diff -urN a/Misc/ACKS b/Misc/ACKS
+--- a/Misc/ACKS 2012-12-25 13:41:08.507405725 +0100
++++ b/Misc/ACKS 2012-12-25 13:48:09.675405378 +0100
+@@ -394,7 +394,6 @@
+ Greg Humphreys
+ Eric Huss
+ Jeremy Hylton
+-Ludwig Hähne
+ Gerhard Häring
+ Fredrik Håård
+ Catalin Iacob
+diff -urN a/Misc/NEWS b/Misc/NEWS
+--- a/Misc/NEWS 2012-12-25 13:41:08.511405725 +0100
++++ b/Misc/NEWS 2012-12-25 13:48:48.947405345 +0100
+@@ -100,9 +100,6 @@
+
+ - Issue #15567: Fix NameError when running threading._test
+
+-- Issue #15424: Add a __sizeof__ implementation for array objects.
+- Patch by Ludwig Hähne.
+-
+ - Issue #13052: Fix IDLE crashing when replace string in Search/Replace dialog
+ ended with '\'. Patch by Roger Serwy.
+
+@@ -115,22 +112,6 @@
+ - Issue #12288: Consider '0' and '0.0' as valid initialvalue
+ for tkinter SimpleDialog.
+
+-- Issue #15489: Add a __sizeof__ implementation for BytesIO objects.
+- Patch by Serhiy Storchaka.
+-
+-- Issue #15469: Add a __sizeof__ implementation for deque objects.
+- Patch by Serhiy Storchaka.
+-
+-- Issue #15487: Add a __sizeof__ implementation for buffered I/O objects.
+- Patch by Serhiy Storchaka.
+-
+-- Issue #15512: Add a __sizeof__ implementation for parser.
+- Patch by Serhiy Storchaka.
+-
+-- Issue #15402: An issue in the struct module that caused sys.getsizeof to
+- return incorrect results for struct.Struct instances has been fixed.
+- Initial patch by Serhiy Storchaka.
+-
+ - Issue #15232: when mangle_from is True, email.Generator now correctly mangles
+ lines that start with 'From ' that occur in a MIME preamble or epilog.
+
+@@ -934,9 +915,6 @@
+ Tests
+ -----
+
+-- Issue #15467: Move helpers for __sizeof__ tests into test_support.
+- Patch by Serhiy Storchaka.
+-
+ - Issue #11689: Fix a variable scoping error in an sqlite3 test.
+ Initial patch by Torsten Landschoff.
+
+diff -urN a/Modules/arraymodule.c b/Modules/arraymodule.c
+--- a/Modules/arraymodule.c 2012-12-25 13:41:08.519405725 +0100
++++ b/Modules/arraymodule.c 2012-12-25 13:48:09.683405376 +0100
+@@ -1533,19 +1533,6 @@
+ PyDoc_STRVAR(reduce_doc, "Return state information for pickling.");
+
+ static PyObject *
+-array_sizeof(arrayobject *self, PyObject *unused)
+-{
+- Py_ssize_t res;
+- res = sizeof(arrayobject) + self->allocated * self->ob_descr->itemsize;
+- return PyLong_FromSsize_t(res);
+-}
+-
+-PyDoc_STRVAR(sizeof_doc,
+-"__sizeof__() -> int\n\
+-\n\
+-Size of the array in memory, in bytes.");
+-
+-static PyObject *
+ array_get_typecode(arrayobject *a, void *closure)
+ {
+ char tc = a->ob_descr->typecode;
+@@ -1619,8 +1606,6 @@
+ #endif
+ {"write", (PyCFunction)array_tofile_as_write, METH_O,
+ tofile_doc},
+- {"__sizeof__", (PyCFunction)array_sizeof, METH_NOARGS,
+- sizeof_doc},
+ {NULL, NULL} /* sentinel */
+ };
+
+diff -urN a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c
+--- a/Modules/_collectionsmodule.c 2012-12-25 13:41:08.511405725 +0100
++++ b/Modules/_collectionsmodule.c 2012-12-25 13:48:19.855405367 +0100
+@@ -991,23 +991,6 @@
+ }
+
+ static PyObject *
+-deque_sizeof(dequeobject *deque, void *unused)
+-{
+- Py_ssize_t res;
+- Py_ssize_t blocks;
+-
+- res = sizeof(dequeobject);
+- blocks = (deque->leftindex + deque->len + BLOCKLEN - 1) / BLOCKLEN;
+- assert(deque->leftindex + deque->len - 1 ==
+- (blocks - 1) * BLOCKLEN + deque->rightindex);
+- res += blocks * sizeof(block);
+- return PyLong_FromSsize_t(res);
+-}
+-
+-PyDoc_STRVAR(sizeof_doc,
+-"D.__sizeof__() -- size of D in memory, in bytes");
+-
+-static PyObject *
+ deque_get_maxlen(dequeobject *deque)
+ {
+ if (deque->maxlen == -1)
+@@ -1070,9 +1053,7 @@
+ {"reverse", (PyCFunction)deque_reverse,
+ METH_NOARGS, reverse_doc},
+ {"rotate", (PyCFunction)deque_rotate,
+- METH_VARARGS, rotate_doc},
+- {"__sizeof__", (PyCFunction)deque_sizeof,
+- METH_NOARGS, sizeof_doc},
++ METH_VARARGS, rotate_doc},
+ {NULL, NULL} /* sentinel */
+ };
+
+diff -urN a/Modules/_io/bufferedio.c b/Modules/_io/bufferedio.c
+--- a/Modules/_io/bufferedio.c 2012-12-25 13:41:08.515405725 +0100
++++ b/Modules/_io/bufferedio.c 2012-12-25 13:48:30.903405359 +0100
+@@ -386,17 +386,6 @@
+ Py_TYPE(self)->tp_free((PyObject *)self);
+ }
+
+-static PyObject *
+-buffered_sizeof(buffered *self, void *unused)
+-{
+- Py_ssize_t res;
+-
+- res = sizeof(buffered);
+- if (self->buffer)
+- res += self->buffer_size;
+- return PyLong_FromSsize_t(res);
+-}
+-
+ static int
+ buffered_traverse(buffered *self, visitproc visit, void *arg)
+ {
+@@ -1571,7 +1560,6 @@
+ {"seek", (PyCFunction)buffered_seek, METH_VARARGS},
+ {"tell", (PyCFunction)buffered_tell, METH_NOARGS},
+ {"truncate", (PyCFunction)buffered_truncate, METH_VARARGS},
+- {"__sizeof__", (PyCFunction)buffered_sizeof, METH_NOARGS},
+ {NULL, NULL}
+ };
+
+@@ -1964,7 +1952,6 @@
+ {"flush", (PyCFunction)buffered_flush, METH_NOARGS},
+ {"seek", (PyCFunction)buffered_seek, METH_VARARGS},
+ {"tell", (PyCFunction)buffered_tell, METH_NOARGS},
+- {"__sizeof__", (PyCFunction)buffered_sizeof, METH_NOARGS},
+ {NULL, NULL}
+ };
+
+@@ -2360,7 +2347,6 @@
+ {"readline", (PyCFunction)buffered_readline, METH_VARARGS},
+ {"peek", (PyCFunction)buffered_peek, METH_VARARGS},
+ {"write", (PyCFunction)bufferedwriter_write, METH_VARARGS},
+- {"__sizeof__", (PyCFunction)buffered_sizeof, METH_NOARGS},
+ {NULL, NULL}
+ };
+
+diff -urN a/Modules/_io/bytesio.c b/Modules/_io/bytesio.c
+--- a/Modules/_io/bytesio.c 2012-12-25 13:41:08.515405725 +0100
++++ b/Modules/_io/bytesio.c 2012-12-25 13:48:30.903405359 +0100
+@@ -794,17 +794,6 @@
+ return 0;
+ }
+
+-static PyObject *
+-bytesio_sizeof(bytesio *self, void *unused)
+-{
+- Py_ssize_t res;
+-
+- res = sizeof(bytesio);
+- if (self->buf)
+- res += self->buf_size;
+- return PyLong_FromSsize_t(res);
+-}
+-
+ static int
+ bytesio_traverse(bytesio *self, visitproc visit, void *arg)
+ {
+@@ -846,7 +835,6 @@
+ {"truncate", (PyCFunction)bytesio_truncate, METH_VARARGS, truncate_doc},
+ {"__getstate__", (PyCFunction)bytesio_getstate, METH_NOARGS, NULL},
+ {"__setstate__", (PyCFunction)bytesio_setstate, METH_O, NULL},
+- {"__sizeof__", (PyCFunction)bytesio_sizeof, METH_NOARGS, NULL},
+ {NULL, NULL} /* sentinel */
+ };
+
+diff -urN a/Modules/parsermodule.c b/Modules/parsermodule.c
+--- a/Modules/parsermodule.c 2012-12-25 13:41:08.523405725 +0100
++++ b/Modules/parsermodule.c 2012-12-25 13:48:19.859405367 +0100
+@@ -169,33 +169,9 @@
+
+
+ static void parser_free(PyST_Object *st);
+-static PyObject* parser_sizeof(PyST_Object *, void *);
+ static int parser_compare(PyST_Object *left, PyST_Object *right);
+ static PyObject *parser_getattr(PyObject *self, char *name);
+-static PyObject* parser_compilest(PyST_Object *, PyObject *, PyObject *);
+-static PyObject* parser_isexpr(PyST_Object *, PyObject *, PyObject *);
+-static PyObject* parser_issuite(PyST_Object *, PyObject *, PyObject *);
+-static PyObject* parser_st2list(PyST_Object *, PyObject *, PyObject *);
+-static PyObject* parser_st2tuple(PyST_Object *, PyObject *, PyObject *);
+
+-#define PUBLIC_METHOD_TYPE (METH_VARARGS|METH_KEYWORDS)
+-
+-static PyMethodDef
+-parser_methods[] = {
+- {"compile", (PyCFunction)parser_compilest, PUBLIC_METHOD_TYPE,
+- PyDoc_STR("Compile this ST object into a code object.")},
+- {"isexpr", (PyCFunction)parser_isexpr, PUBLIC_METHOD_TYPE,
+- PyDoc_STR("Determines if this ST object was created from an expression.")},
+- {"issuite", (PyCFunction)parser_issuite, PUBLIC_METHOD_TYPE,
+- PyDoc_STR("Determines if this ST object was created from a suite.")},
+- {"tolist", (PyCFunction)parser_st2list, PUBLIC_METHOD_TYPE,
+- PyDoc_STR("Creates a list-tree representation of this ST.")},
+- {"totuple", (PyCFunction)parser_st2tuple, PUBLIC_METHOD_TYPE,
+- PyDoc_STR("Creates a tuple-tree representation of this ST.")},
+- {"__sizeof__", (PyCFunction)parser_sizeof, METH_NOARGS,
+- PyDoc_STR("Returns size in memory, in bytes.")},
+- {NULL, NULL, 0, NULL}
+-};
+
+ static
+ PyTypeObject PyST_Type = {
+@@ -224,14 +200,7 @@
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+
+ /* __doc__ */
+- "Intermediate representation of a Python parse tree.",
+- 0, /* tp_traverse */
+- 0, /* tp_clear */
+- 0, /* tp_richcompare */
+- 0, /* tp_weaklistoffset */
+- 0, /* tp_iter */
+- 0, /* tp_iternext */
+- parser_methods, /* tp_methods */
++ "Intermediate representation of a Python parse tree."
+ }; /* PyST_Type */
+
+
+@@ -525,6 +494,25 @@
+ }
+
+
++#define PUBLIC_METHOD_TYPE (METH_VARARGS|METH_KEYWORDS)
++
++static PyMethodDef
++parser_methods[] = {
++ {"compile", (PyCFunction)parser_compilest, PUBLIC_METHOD_TYPE,
++ PyDoc_STR("Compile this ST object into a code object.")},
++ {"isexpr", (PyCFunction)parser_isexpr, PUBLIC_METHOD_TYPE,
++ PyDoc_STR("Determines if this ST object was created from an expression.")},
++ {"issuite", (PyCFunction)parser_issuite, PUBLIC_METHOD_TYPE,
++ PyDoc_STR("Determines if this ST object was created from a suite.")},
++ {"tolist", (PyCFunction)parser_st2list, PUBLIC_METHOD_TYPE,
++ PyDoc_STR("Creates a list-tree representation of this ST.")},
++ {"totuple", (PyCFunction)parser_st2tuple, PUBLIC_METHOD_TYPE,
++ PyDoc_STR("Creates a tuple-tree representation of this ST.")},
++
++ {NULL, NULL, 0, NULL}
++};
++
++
+ static PyObject*
+ parser_getattr(PyObject *self, char *name)
+ {
+@@ -707,15 +695,6 @@
+ return parser_tuple2st(self, args, kw);
+ }
+
+-static PyObject *
+-parser_sizeof(PyST_Object *st, void *unused)
+-{
+- Py_ssize_t res;
+-
+- res = sizeof(PyST_Object) + _PyNode_SizeOf(st->st_node);
+- return PyLong_FromSsize_t(res);
+-}
+-
+
+ /* node* build_node_children()
+ *
+diff -urN a/Modules/_struct.c b/Modules/_struct.c
+--- a/Modules/_struct.c 2012-12-25 13:41:08.519405725 +0100
++++ b/Modules/_struct.c 2012-12-25 13:48:48.951405344 +0100
+@@ -1693,18 +1693,6 @@
+ return PyInt_FromSsize_t(self->s_size);
+ }
+
+-PyDoc_STRVAR(s_sizeof__doc__,
+-"S.__sizeof__() -> size of S in memory, in bytes");
+-
+-static PyObject *
+-s_sizeof(PyStructObject *self, void *unused)
+-{
+- Py_ssize_t size;
+-
+- size = sizeof(PyStructObject) + sizeof(formatcode) * (self->s_len + 1);
+- return PyLong_FromSsize_t(size);
+-}
+-
+ /* List of functions */
+
+ static struct PyMethodDef s_methods[] = {
+@@ -1713,7 +1701,6 @@
+ {"unpack", s_unpack, METH_O, s_unpack__doc__},
+ {"unpack_from", (PyCFunction)s_unpack_from, METH_VARARGS|METH_KEYWORDS,
+ s_unpack_from__doc__},
+- {"__sizeof__", (PyCFunction)s_sizeof, METH_NOARGS, s_sizeof__doc__},
+ {NULL, NULL} /* sentinel */
+ };
+
+diff -urN a/Parser/node.c b/Parser/node.c
+--- a/Parser/node.c 2012-12-25 13:41:08.551405725 +0100
++++ b/Parser/node.c 2012-12-25 13:48:19.859405367 +0100
+@@ -114,7 +114,6 @@
+
+ /* Forward */
+ static void freechildren(node *);
+-static Py_ssize_t sizeofchildren(node *n);
+
+
+ void
+@@ -126,16 +125,6 @@
+ }
+ }
+
+-Py_ssize_t
+-_PyNode_SizeOf(node *n)
+-{
+- Py_ssize_t res = 0;
+-
+- if (n != NULL)
+- res = sizeof(node) + sizeofchildren(n);
+- return res;
+-}
+-
+ static void
+ freechildren(node *n)
+ {
+@@ -147,18 +136,3 @@
+ if (STR(n) != NULL)
+ PyObject_FREE(STR(n));
+ }
+-
+-static Py_ssize_t
+-sizeofchildren(node *n)
+-{
+- Py_ssize_t res = 0;
+- int i;
+- for (i = NCH(n); --i >= 0; )
+- res += sizeofchildren(CHILD(n, i));
+- if (n->n_child != NULL)
+- /* allocated size of n->n_child array */
+- res += XXXROUNDUP(NCH(n)) * sizeof(node);
+- if (STR(n) != NULL)
+- res += strlen(STR(n)) + 1;
+- return res;
+-}
diff --git a/examples/python2.7/patches/series b/examples/python2.7/patches/series
new file mode 100644
index 0000000..54d6e55
--- /dev/null
+++ b/examples/python2.7/patches/series
@@ -0,0 +1,46 @@
+hg-updates.diff
+revert-sizeof-methods.diff
+revert-issue14635.diff
+makeflags.diff
+issue9374-followup.diff
+setup-modules-heapq.diff
+issue9189.diff
+issue15340.diff
+issue15847.diff
+deb-setup.diff
+deb-locations.diff
+site-locations.diff
+distutils-install-layout.diff
+locale-module.diff
+distutils-link.diff
+distutils-sysconfig.diff
+test-sundry.diff
+tkinter-import.diff
+hotshot-import.diff
+webbrowser.diff
+linecache.diff
+doc-nodownload.diff
+profiled-build.diff
+no-zip-on-sys.path.diff
+platform-lsbrelease.diff
+bdist-wininst-notfound.diff
+setup-modules-ssl.diff
+#makesetup-bashism.diff
+hurd-broken-poll.diff
+hurd-disable-nonworking-constants.diff
+enable-fpectl.diff
+statvfs-f_flag-constants.diff
+issue9012a.diff
+link-system-expat.diff
+plat-gnukfreebsd.diff
+bsddb-libpath.diff
+disable-sem-check.diff
+ncursesw-incdir.diff
+multiprocessing-typos.diff
+ctypes-arm.diff
+db5.1.diff
+disable-ssl-cert-tests.diff
+xdg-gvfs-open.diff
+sqlite-rpath.diff
+do-not-italicize-punctuation.patch
+sysconfigdata.diff
diff --git a/examples/python2.7/patches/setup-modules-heapq.diff b/examples/python2.7/patches/setup-modules-heapq.diff
new file mode 100644
index 0000000..0e34f49
--- /dev/null
+++ b/examples/python2.7/patches/setup-modules-heapq.diff
@@ -0,0 +1,12 @@
+Index: b/Modules/Setup.dist
+===================================================================
+--- a/Modules/Setup.dist
++++ b/Modules/Setup.dist
+@@ -177,6 +177,7 @@
+ #_testcapi _testcapimodule.c # Python C API test module
+ #_random _randommodule.c # Random number generator
+ #_collections _collectionsmodule.c # Container types
++#_heapq _heapqmodule.c # Heapq type
+ #itertools itertoolsmodule.c # Functions creating iterators for efficient looping
+ #strop stropmodule.c # String manipulations
+ #_functools _functoolsmodule.c # Tools for working with functions and callable objects
diff --git a/examples/python2.7/patches/setup-modules-ssl.diff b/examples/python2.7/patches/setup-modules-ssl.diff
new file mode 100644
index 0000000..d4bf356
--- /dev/null
+++ b/examples/python2.7/patches/setup-modules-ssl.diff
@@ -0,0 +1,24 @@
+# DP: Modules/Setup.dist: patch to build _hashlib and _ssl extensions statically
+
+--- a/Modules/Setup.dist
++++ b/Modules/Setup.dist
+@@ -211,10 +211,7 @@
+
+ # Socket module helper for SSL support; you must comment out the other
+ # socket line above, and possibly edit the SSL variable:
+-#SSL=/usr/local/ssl
+-#_ssl _ssl.c \
+-# -DUSE_SSL -I$(SSL)/include -I$(SSL)/include/openssl \
+-# -L$(SSL)/lib -lssl -lcrypto
++#_ssl _ssl.c -lssl -lcrypto
+
+ # The crypt module is now disabled by default because it breaks builds
+ # on many systems (where -lcrypt is needed), e.g. Linux (I believe).
+@@ -257,6 +254,7 @@
+ #_sha256 sha256module.c
+ #_sha512 sha512module.c
+
++#_hashlib _hashopenssl.c -lssl -lcrypto
+
+ # SGI IRIX specific modules -- off by default.
+
diff --git a/examples/python2.7/patches/setup-modules.dpatch b/examples/python2.7/patches/setup-modules.dpatch
new file mode 100644
index 0000000..c12ec0b
--- /dev/null
+++ b/examples/python2.7/patches/setup-modules.dpatch
@@ -0,0 +1,75 @@
+#! /bin/sh -e
+
+# DP: Modules/Setup.dist: patches to build some extensions statically
+
+dir=
+if [ $# -eq 3 -a "$2" = '-d' ]; then
+ pdir="-d $3"
+ dir="$3/"
+elif [ $# -ne 1 ]; then
+ echo >&2 "usage: `basename $0`: -patch|-unpatch [-d <srcdir>]"
+ exit 1
+fi
+case "$1" in
+ -patch)
+ patch $pdir -f --no-backup-if-mismatch -p0 < $0
+ ;;
+ -unpatch)
+ patch $pdir -f --no-backup-if-mismatch -R -p0 < $0
+ ;;
+ *)
+ echo >&2 "usage: `basename $0`: -patch|-unpatch [-d <srcdir>]"
+ exit 1
+esac
+exit 0
+
+--- Modules/_elementtree.c~ 2008-11-27 10:01:33.000000000 +0100
++++ Modules/_elementtree.c 2008-11-27 10:03:30.000000000 +0100
+@@ -1837,7 +1837,10 @@
+ static struct PyExpat_CAPI* expat_capi;
+ #define EXPAT(func) (expat_capi->func)
+ #else
+-#define EXPAT(func) (XML_##func)
++#define EXPAT(func) (PyExpat_XML_##func)
++#define PyExpat_XML_GetErrorLineNumber PyExpat_XML_GetCurrentLineNumber
++#define PyExpat_XML_GetErrorColumnNumber PyExpat_XML_GetCurrentColumnNumber
++#define PyExpat_XML_GetErrorByteIndex PyExpat_XML_GetCurrentByteIndex
+ #endif
+
+ typedef struct {
+--- Modules/Setup.dist~ 2008-11-27 10:59:37.000000000 +0100
++++ Modules/Setup.dist 2008-11-27 11:00:26.000000000 +0100
+@@ -165,7 +165,7 @@
+ #itertools itertoolsmodule.c # Functions creating iterators for efficient looping
+ #atexit atexitmodule.c # Register functions to be run at interpreter-shutdown
+ #_functools _functoolsmodule.c # Tools for working with functions and callable objects
+-#_elementtree -I$(srcdir)/Modules/expat -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI _elementtree.c # elementtree accelerator
++#_elementtree -I$(srcdir)/Modules/expat -DHAVE_EXPAT_CONFIG_H _elementtree.c # elementtree accelerator
+ #_pickle _pickle.c # pickle accelerator
+ #datetime datetimemodule.c # date/time type
+ #_bisect _bisectmodule.c # Bisection algorithms
+@@ -257,6 +257,7 @@
+ #_sha256 sha256module.c
+ #_sha512 sha512module.c
+
++#_hashlib _hashopenssl.c -lssl -lcrypto
+
+ # SGI IRIX specific modules -- off by default.
+
+@@ -341,6 +341,7 @@
+ #DBLIB=$(DB)/lib
+ #_bsddb _bsddb.c -I$(DBINC) -L$(DBLIB) -ldb-$(DBLIBVER)
+
++#_ctypes _ctypes/_ctypes.c _ctypes/callbacks.c _ctypes/callproc.c _ctypes/stgdict.c _ctypes/cfield.c _ctypes/malloc_closure.c -Wl,-Bstatic -lffi -Wl,-Bdynamic
+
+ # Helper module for various ascii-encoders
+ #binascii binascii.c
+@@ -382,7 +382,7 @@
+ #
+ # More information on Expat can be found at www.libexpat.org.
+ #
+-#pyexpat expat/xmlparse.c expat/xmlrole.c expat/xmltok.c pyexpat.c -I$(srcdir)/Modules/expat -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI
++#pyexpat expat/xmlparse.c expat/xmlrole.c expat/xmltok.c pyexpat.c -I$(srcdir)/Modules/expat -DHAVE_EXPAT_CONFIG_H
+
+ # Hye-Shik Chang's CJKCodecs
+
diff --git a/examples/python2.7/patches/site-locations.diff b/examples/python2.7/patches/site-locations.diff
new file mode 100644
index 0000000..7627bd4
--- /dev/null
+++ b/examples/python2.7/patches/site-locations.diff
@@ -0,0 +1,32 @@
+# DP: Set site-packages/dist-packages
+
+--- a/Lib/site.py
++++ b/Lib/site.py
+@@ -19,6 +19,12 @@
+ resulting directories, if they exist, are appended to sys.path, and
+ also inspected for path configuration files.
+
++For Debian and derivatives, this sys.path is augmented with directories
++for packages distributed within the distribution. Local addons go
++into /usr/local/lib/python<version>/dist-packages, Debian addons
++install into /usr/{lib,share}/python<version>/dist-packages.
++/usr/lib/python<version>/site-packages is not used.
++
+ A path configuration file is a file whose name has the form
+ <package>.pth; its contents are additional directories (one per line)
+ to be added to sys.path. Non-existing directories (or
+@@ -300,10 +306,12 @@
+ if sys.platform in ('os2emx', 'riscos'):
+ sitepackages.append(os.path.join(prefix, "Lib", "site-packages"))
+ elif os.sep == '/':
++ sitepackages.append(os.path.join(prefix, "local/lib",
++ "python" + sys.version[:3],
++ "dist-packages"))
+ sitepackages.append(os.path.join(prefix, "lib",
+ "python" + sys.version[:3],
+- "site-packages"))
+- sitepackages.append(os.path.join(prefix, "lib", "site-python"))
++ "dist-packages"))
+ else:
+ sitepackages.append(prefix)
+ sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
diff --git a/examples/python2.7/patches/sqlite-rpath.diff b/examples/python2.7/patches/sqlite-rpath.diff
new file mode 100644
index 0000000..8e34086
--- /dev/null
+++ b/examples/python2.7/patches/sqlite-rpath.diff
@@ -0,0 +1,10 @@
+--- a/setup.py
++++ b/setup.py
+@@ -1106,7 +1106,6 @@
+ include_dirs=["Modules/_sqlite",
+ sqlite_incdir],
+ library_dirs=sqlite_libdir,
+- runtime_library_dirs=sqlite_libdir,
+ extra_link_args=sqlite_extra_link_args,
+ libraries=["sqlite3",]))
+ else:
diff --git a/examples/python2.7/patches/statvfs-f_flag-constants.diff b/examples/python2.7/patches/statvfs-f_flag-constants.diff
new file mode 100644
index 0000000..56419e8
--- /dev/null
+++ b/examples/python2.7/patches/statvfs-f_flag-constants.diff
@@ -0,0 +1,57 @@
+From 21fda4c78000d78cb1824fdf0373031d07f5325a Mon Sep 17 00:00:00 2001
+From: Peter Jones <pjones@redhat.com>
+Date: Wed, 6 Jan 2010 15:22:38 -0500
+Subject: [PATCH] Add flags for statvfs.f_flag to constant list.
+
+You really need these to figure out what statvfs is trying to say to
+you, so add them here.
+---
+ Modules/posixmodule.c | 37 +++++++++++++++++++++++++++++++++++++
+ 1 files changed, 37 insertions(+), 0 deletions(-)
+
+--- a/Modules/posixmodule.c
++++ b/Modules/posixmodule.c
+@@ -9301,6 +9301,43 @@
+ if (ins(d, "EX_NOTFOUND", (long)EX_NOTFOUND)) return -1;
+ #endif /* EX_NOTFOUND */
+
++ /* These came from statvfs.h */
++#ifdef ST_RDONLY
++ if (ins(d, "ST_RDONLY", (long)ST_RDONLY)) return -1;
++#endif /* ST_RDONLY */
++#ifdef ST_NOSUID
++ if (ins(d, "ST_NOSUID", (long)ST_NOSUID)) return -1;
++#endif /* ST_NOSUID */
++
++ /* GNU extensions */
++#ifdef ST_NODEV
++ if (ins(d, "ST_NODEV", (long)ST_NODEV)) return -1;
++#endif /* ST_NODEV */
++#ifdef ST_NOEXEC
++ if (ins(d, "ST_NOEXEC", (long)ST_NOEXEC)) return -1;
++#endif /* ST_NOEXEC */
++#ifdef ST_SYNCHRONOUS
++ if (ins(d, "ST_SYNCHRONOUS", (long)ST_SYNCHRONOUS)) return -1;
++#endif /* ST_SYNCHRONOUS */
++#ifdef ST_MANDLOCK
++ if (ins(d, "ST_MANDLOCK", (long)ST_MANDLOCK)) return -1;
++#endif /* ST_MANDLOCK */
++#ifdef ST_WRITE
++ if (ins(d, "ST_WRITE", (long)ST_WRITE)) return -1;
++#endif /* ST_WRITE */
++#ifdef ST_APPEND
++ if (ins(d, "ST_APPEND", (long)ST_APPEND)) return -1;
++#endif /* ST_APPEND */
++#ifdef ST_NOATIME
++ if (ins(d, "ST_NOATIME", (long)ST_NOATIME)) return -1;
++#endif /* ST_NOATIME */
++#ifdef ST_NODIRATIME
++ if (ins(d, "ST_NODIRATIME", (long)ST_NODIRATIME)) return -1;
++#endif /* ST_NODIRATIME */
++#ifdef ST_RELATIME
++ if (ins(d, "ST_RELATIME", (long)ST_RELATIME)) return -1;
++#endif /* ST_RELATIME */
++
+ #ifdef HAVE_SPAWNV
+ #if defined(PYOS_OS2) && defined(PYCC_GCC)
+ if (ins(d, "P_WAIT", (long)P_WAIT)) return -1;
diff --git a/examples/python2.7/patches/subprocess-eintr-safety.dpatch b/examples/python2.7/patches/subprocess-eintr-safety.dpatch
new file mode 100644
index 0000000..6a99712
--- /dev/null
+++ b/examples/python2.7/patches/subprocess-eintr-safety.dpatch
@@ -0,0 +1,81 @@
+#! /bin/sh -e
+
+dir=
+if [ $# -eq 3 -a "$2" = '-d' ]; then
+ pdir="-d $3"
+ dir="$3/"
+elif [ $# -ne 1 ]; then
+ echo >&2 "usage: `basename $0`: -patch|-unpatch [-d <srcdir>]"
+ exit 1
+fi
+case "$1" in
+ -patch)
+ patch $pdir -f --no-backup-if-mismatch -p0 < $0
+ #cd ${dir}gcc && autoconf
+ ;;
+ -unpatch)
+ patch $pdir -f --no-backup-if-mismatch -R -p0 < $0
+ #rm ${dir}gcc/configure
+ ;;
+ *)
+ echo >&2 "usage: `basename $0`: -patch|-unpatch [-d <srcdir>]"
+ exit 1
+esac
+exit 0
+
+--- Lib/test/test_subprocess.py 2007-03-14 19:16:36.000000000 +0100
++++ Lib/test/test_subprocess.py 2007-03-14 19:18:57.000000000 +0100
+@@ -580,6 +578,34 @@ class ProcessTestCase(unittest.TestCase)
+ os.remove(fname)
+ self.assertEqual(rc, 47)
+
++ def test_eintr(self):
++ # retries on EINTR for an argv
++
++ # send ourselves a signal that causes EINTR
++ prev_handler = signal.signal(signal.SIGALRM, lambda x,y: 1)
++ signal.alarm(1)
++ time.sleep(0.5)
++
++ rc = subprocess.Popen(['sleep', '1'])
++ self.assertEqual(rc.wait(), 0)
++
++ signal.signal(signal.SIGALRM, prev_handler)
++
++ def test_eintr_out(self):
++ # retries on EINTR for a shell call and pipelining
++
++ # send ourselves a signal that causes EINTR
++ prev_handler = signal.signal(signal.SIGALRM, lambda x,y: 1)
++ signal.alarm(1)
++ time.sleep(0.5)
++
++ rc = subprocess.Popen("sleep 1; echo hello",
++ shell=True, stdout=subprocess.PIPE)
++ out = rc.communicate()[0]
++ self.assertEqual(rc.returncode, 0)
++ self.assertEqual(out, "hello\n")
++
++ signal.signal(signal.SIGALRM, prev_handler)
+
+ #
+ # Windows tests
+--- Lib/subprocess.py~ 2008-07-15 15:41:24.000000000 +0200
++++ Lib/subprocess.py 2008-07-15 15:42:49.000000000 +0200
+@@ -657,13 +657,13 @@
+ stderr = None
+ if self.stdin:
+ if input:
+- self.stdin.write(input)
++ self._fo_write_no_intr(self.stdin, input)
+ self.stdin.close()
+ elif self.stdout:
+- stdout = self.stdout.read()
++ stdout = self._fo_read_no_intr(self.stdout)
+ self.stdout.close()
+ elif self.stderr:
+- stderr = self.stderr.read()
++ stderr = self._fo_read_no_intr(self.stderr)
+ self.stderr.close()
+ self.wait()
+ return (stdout, stderr)
diff --git a/examples/python2.7/patches/sysconfigdata.diff b/examples/python2.7/patches/sysconfigdata.diff
new file mode 100644
index 0000000..a91bf8e
--- /dev/null
+++ b/examples/python2.7/patches/sysconfigdata.diff
@@ -0,0 +1,91 @@
+Index: b/Lib/sysconfig.py
+===================================================================
+--- a/Lib/sysconfig.py
++++ b/Lib/sysconfig.py
+@@ -330,9 +330,10 @@
+ return os.path.join(_PROJECT_BASE, "Makefile")
+ return os.path.join(get_path('platstdlib').replace("/usr/local","/usr",1), "config" + (sys.pydebug and "_d" or ""), "Makefile")
+
+-
+-def _init_posix(vars):
+- """Initialize the module as appropriate for POSIX systems."""
++def _generate_posix_vars():
++ """Generate the Python module containing build-time variables."""
++ import pprint
++ vars = {}
+ # load the installed Makefile:
+ makefile = _get_makefile_filename()
+ try:
+@@ -360,6 +361,19 @@
+ if _PYTHON_BUILD:
+ vars['LDSHARED'] = vars['BLDSHARED']
+
++ destfile = '_sysconfigdata.py'
++ with open(destfile, 'w') as f:
++ f.write('# system configuration generated and used by'
++ ' the sysconfig module\n')
++ f.write('build_time_vars = ')
++ pprint.pprint(vars, stream=f)
++
++def _init_posix(vars):
++ """Initialize the module as appropriate for POSIX systems."""
++ # _sysconfigdata is generated at build time, see _generate_posix_vars()
++ from _sysconfigdata import build_time_vars
++ vars.update(build_time_vars)
++
+ def _init_non_posix(vars):
+ """Initialize the module as appropriate for NT"""
+ # set basic install directories
+Index: b/Makefile.pre.in
+===================================================================
+--- a/Makefile.pre.in
++++ b/Makefile.pre.in
+@@ -374,7 +374,7 @@
+
+ # Default target
+ all: build_all
+-build_all: $(BUILDPYTHON) oldsharedmods sharedmods gdbhooks
++build_all: $(BUILDPYTHON) $(SYSCONFIGDATA) oldsharedmods sharedmods gdbhooks
+
+ # Compile a binary with gcc profile guided optimization.
+ profile-opt:
+@@ -402,6 +402,7 @@
+ $(MAKE) clean
+ $(MAKE) all PY_CFLAGS="$(PY_CFLAGS) -O0 -pg -fprofile-arcs -ftest-coverage" LIBS="$(LIBS) -lgcov"
+
++SYSCONFIGDATA=_sysconfigdata.py
+
+ # Build the interpreter
+ $(BUILDPYTHON): Modules/python.o $(LIBRARY) $(LDLIBRARY)
+@@ -409,14 +410,20 @@
+ Modules/python.o \
+ -Wl,--whole-archive $(BLDLIBRARY) -Wl,--no-whole-archive $(LIBS) $(MODLIBS) $(SYSLIBS) $(LDLAST)
+
+-platform: $(BUILDPYTHON)
++platform: $(BUILDPYTHON) $(SYSCONFIGDATA)
+ $(RUNSHARED) ./$(BUILDPYTHON) -E -c 'import sys ; from sysconfig import get_platform ; print get_platform()+"-"+sys.version[0:3]' >platform
+
++# Generate the sysconfig build-time data
++$(SYSCONFIGDATA): $(BUILDPYTHON)
++ $(RUNSHARED) ./$(BUILDPYTHON) -SE -c 'import sysconfig; sysconfig._generate_posix_vars()'
++ $(RUNSHARED) ./$(BUILDPYTHON) -S -c 'import os,sys ; from distutils.util import get_platform ; d=os.path.join("build", "lib."+get_platform()+"-"+sys.version[0:3]+("-pydebug" if hasattr(sys, "gettotalrefcount") else "")); sys.stdout.write(d)' > pybuilddir.txt
++ mkdir -p `cat pybuilddir.txt`
++ cp $(SYSCONFIGDATA) `cat pybuilddir.txt`/.
+
+ # Build the shared modules
+ # MAKEFLAGS are sorted and normalized. Under GNU make the 's' for
+ # -s, --silent or --quiet is always the first char.
+-sharedmods: $(BUILDPYTHON)
++sharedmods: $(BUILDPYTHON) $(SYSCONFIGDATA)
+ @case "$$MAKEFLAGS" in \
+ s*) quiet="-q";; \
+ *) quiet="";; \
+@@ -910,7 +917,7 @@
+ else true; \
+ fi; \
+ done
+- @for i in $(srcdir)/Lib/*.py $(srcdir)/Lib/*.doc $(srcdir)/Lib/*.egg-info ; \
++ @for i in $(srcdir)/Lib/*.py $(SYSCONFIGDATA) $(srcdir)/Lib/*.doc $(srcdir)/Lib/*.egg-info ; \
+ do \
+ if test -x $$i; then \
+ $(INSTALL_SCRIPT) $$i $(DESTDIR)$(LIBDEST); \
diff --git a/examples/python2.7/patches/test-sundry.diff b/examples/python2.7/patches/test-sundry.diff
new file mode 100644
index 0000000..167ce48
--- /dev/null
+++ b/examples/python2.7/patches/test-sundry.diff
@@ -0,0 +1,17 @@
+# DP: test_sundry: Don't fail on import of the profile and pstats module
+
+--- a/Lib/test/test_sundry.py
++++ b/Lib/test/test_sundry.py
+@@ -62,7 +62,11 @@
+ import os2emxpath
+ import pdb
+ import posixfile
+- import pstats
++ try:
++ import pstats # separated out into the python-profiler package
++ except ImportError:
++ if test_support.verbose:
++ print "skipping profile and pstats"
+ import py_compile
+ import rexec
+ import sched
diff --git a/examples/python2.7/patches/tkinter-import.diff b/examples/python2.7/patches/tkinter-import.diff
new file mode 100644
index 0000000..910b72c
--- /dev/null
+++ b/examples/python2.7/patches/tkinter-import.diff
@@ -0,0 +1,16 @@
+# DP: suggest installation of python-tk package on failing _tkinter import
+
+--- a/Lib/lib-tk/Tkinter.py
++++ b/Lib/lib-tk/Tkinter.py
+@@ -36,7 +36,10 @@
+ if sys.platform == "win32":
+ # Attempt to configure Tcl/Tk without requiring PATH
+ import FixTk
+-import _tkinter # If this fails your Python may not be configured for Tk
++try:
++ import _tkinter
++except ImportError, msg:
++ raise ImportError, str(msg) + ', please install the python-tk package'
+ tkinter = _tkinter # b/w compat for export
+ TclError = _tkinter.TclError
+ from types import *
diff --git a/examples/python2.7/patches/webbrowser.diff b/examples/python2.7/patches/webbrowser.diff
new file mode 100644
index 0000000..c56e22b
--- /dev/null
+++ b/examples/python2.7/patches/webbrowser.diff
@@ -0,0 +1,27 @@
+# DP: Recognize other browsers: www-browser, x-www-browser, iceweasel, iceape.
+
+--- a/Lib/webbrowser.py
++++ b/Lib/webbrowser.py
+@@ -449,9 +449,13 @@
+ if "KDE_FULL_SESSION" in os.environ and _iscommand("kfmclient"):
+ register("kfmclient", Konqueror, Konqueror("kfmclient"))
+
++ if _iscommand("x-www-browser"):
++ register("x-www-browser", None, BackgroundBrowser("x-www-browser"))
++
+ # The Mozilla/Netscape browsers
+ for browser in ("mozilla-firefox", "firefox",
+ "mozilla-firebird", "firebird",
++ "iceweasel", "iceape",
+ "seamonkey", "mozilla", "netscape"):
+ if _iscommand(browser):
+ register(browser, None, Mozilla(browser))
+@@ -489,6 +493,8 @@
+
+ # Also try console browsers
+ if os.environ.get("TERM"):
++ if _iscommand("www-browser"):
++ register("www-browser", None, GenericBrowser("www-browser"))
+ # The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
+ if _iscommand("links"):
+ register("links", None, GenericBrowser("links"))
diff --git a/examples/python2.7/patches/xdg-gvfs-open.diff b/examples/python2.7/patches/xdg-gvfs-open.diff
new file mode 100644
index 0000000..7f6ec20
--- /dev/null
+++ b/examples/python2.7/patches/xdg-gvfs-open.diff
@@ -0,0 +1,17 @@
+--- a/Lib/webbrowser.py
++++ b/Lib/webbrowser.py
+@@ -441,6 +441,14 @@
+
+ def register_X_browsers():
+
++ # use xdg-open if around
++ if _iscommand("xdg-open"):
++ register("xdg-open", None, BackgroundBrowser("xdg-open"))
++
++ # The default GNOME3 browser
++ if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gvfs-open"):
++ register("gvfs-open", None, BackgroundBrowser("gvfs-open"))
++
+ # The default GNOME browser
+ if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gnome-open"):
+ register("gnome-open", None, BackgroundBrowser("gnome-open"))