Update python prebuilts

The main reason for this is b/73397411 (python spews messages about not
being able to find openssl).

Python's module build system is hideous, so after a couple of attempts I
gave up on convincing it to use a statically linked openssl (which is
how we solved the same issue for cmake prebuilts). Instead, I just chose
to hack the hashlib module to silence the errors (as we don't need
cryptographic hashing for anything we do in lldb).

Bug: 73397411
Change-Id: I28ee305d2f1519fc5e273153e097320cbe0b2f22
diff --git a/bin/2to3 b/bin/2to3
index e0dbedd..80fe2e9 100755
--- a/bin/2to3
+++ b/bin/2to3
@@ -1,4 +1,4 @@
-#!/tmp/python-chaorenl/install/bin/python2.7
+#!/bin/python2.7
 import sys
 from lib2to3.main import main
 
diff --git a/bin/idle b/bin/idle
index 06bc878..8b59bb7 100755
--- a/bin/idle
+++ b/bin/idle
@@ -1,4 +1,4 @@
-#!/tmp/python-chaorenl/install/bin/python2.7
+#!/bin/python2.7
 
 from idlelib.PyShell import main
 if __name__ == '__main__':
diff --git a/bin/pydoc b/bin/pydoc
index d7eef36..2f24f49 100755
--- a/bin/pydoc
+++ b/bin/pydoc
@@ -1,4 +1,4 @@
-#!/tmp/python-chaorenl/install/bin/python2.7
+#!/bin/python2.7
 
 import pydoc
 if __name__ == '__main__':
diff --git a/bin/python2.7 b/bin/python2.7
index e490554..fc992ab 100755
--- a/bin/python2.7
+++ b/bin/python2.7
Binary files differ
diff --git a/bin/python2.7-config b/bin/python2.7-config
index 01d94e0..9cf97b9 100755
--- a/bin/python2.7-config
+++ b/bin/python2.7-config
@@ -1,4 +1,4 @@
-#!/tmp/python-chaorenl/install/bin/python2.7
+#!/bin/python2.7
 
 import sys
 import os
diff --git a/bin/smtpd.py b/bin/smtpd.py
index cfd0a65..7230a5a 100755
--- a/bin/smtpd.py
+++ b/bin/smtpd.py
@@ -1,4 +1,4 @@
-#!/tmp/python-chaorenl/install/bin/python2.7
+#!/bin/python2.7
 """An RFC 2821 smtp proxy.
 
 Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
diff --git a/build-common.sh b/build-common.sh
deleted file mode 100755
index a7721d0..0000000
--- a/build-common.sh
+++ /dev/null
@@ -1,135 +0,0 @@
-# latest version of this file can be found at
-# https://android.googlesource.com/platform/external/lldb-utils
-#
-# inputs
-# $PROJ - project name
-# $VER - project version
-# $1 - name of this file
-#
-# this file does the following:
-#
-# 1) define the following env vars
-#    OS - linux|darwin|windows
-#    USER - username
-#    CORES - numer of cores (for parallel builds)
-#    PATH (with appropriate compilers)
-#    CFLAGS/CXXFLAGS/LDFLAGS
-#    RD - root directory for source and object files
-#    INSTALL - install directory/git repo root
-#    SCRIPT_FILE - absolute path to the parent build script
-#    SCRIPT_DIR - absolute path to the parent build script's directory
-#    COMMON_FILE - absolute path to this file
-# 2) create an empty tmp directory at /tmp/$PROJ-$USER
-# 3) checkout the destination git repo to /tmp/prebuilts/$PROJ/$OS-x86/$VER
-# 4) cd $RD
-
-UNAME="$(uname)"
-SCRATCH=/tmp
-UPSTREAM=https://android.googlesource.com/platform/prebuilts
-case "$UNAME" in
-Linux)
-    OS='linux'
-    INSTALL_VER=$VER
-    ;;
-Darwin)
-    OS='darwin'
-    OSX_MIN=10.8
-    export CC=clang
-    export CXX=$CC++
-    export CFLAGS="$CFLAGS -mmacosx-version-min=$OSX_MIN"
-    export CXXFLAGS="$CXXFLAGS -mmacosx-version-min=$OSX_MIN -stdlib=libc++"
-    export LDFLAGS="$LDFLAGS -mmacosx-version-min=$OSX_MIN"
-    INSTALL_VER=$VER
-    ;;
-*_NT-*)
-    USER=$USERNAME
-    OS='windows'
-    CORES=$NUMBER_OF_PROCESSORS
-    # VS2013 x64 Native Tools Command Prompt
-    case "$MSVS" in
-    2013)
-        devenv() {
-            cmd /c "${VS120COMNTOOLS}VsDevCmd.bat" '&' devenv.com "$@"
-        }
-        INSTALL_VER=${VER}_${MSVS}
-        ;;
-    *)
-        # g++/make build
-        export CC=x86_64-w64-mingw32-gcc
-        export CXX=x86_64-w64-mingw32-g++
-        export LD=x86_64-w64-mingw32-ld
-        ;;
-    esac
-    ;;
-*)
-    exit 1
-    ;;
-esac
-
-RD=$SCRATCH/$PROJ-$USER
-INSTALL="$RD/install"
-
-# OSX lacks a "realpath" bash command
-realpath() {
-    [[ "$1" == /* ]] && echo "$1" || echo "$PWD/${1#./}"
-}
-
-SCRIPT_FILE=$(realpath "$0")
-SCRIPT_DIR="$(dirname "$SCRIPT_FILE")"
-COMMON_FILE="$SCRIPT_DIR/$1"
-
-cd /tmp # windows can't delete if you're in the dir
-rm -rf $RD
-mkdir -p $INSTALL
-mkdir -p $RD
-cd $RD
-
-# clone prebuilt gcc
-case "$OS" in
-linux)
-    # can't get prebuilt clang working so we're using host clang-3.5 https://b/22748915
-    #CLANG_DIR=$RD/clang
-    #git clone $UPSTREAM/clang/linux-x86/host/3.6 $CLANG_DIR
-    #export CC="$CLANG_DIR/bin/clang"
-    #export CXX="$CC++"
-    export CC=clang-3.5
-    export CXX=clang++-3.5
-
-    GCC_DIR=$RD/gcc
-    git clone $UPSTREAM/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 $GCC_DIR
-
-    find "$GCC_DIR" -name x86_64-linux -exec ln -fns {} {}-gnu \;
-
-    FLAGS+=(-fuse-ld=gold)
-    FLAGS+=(--gcc-toolchain="$GCC_DIR")
-    FLAGS+=(--sysroot "$GCC_DIR/sysroot")
-    FLAGS+=(-B"$GCC_DIR/bin/x86_64-linux-")
-    export CFLAGS="$CFLAGS ${FLAGS[*]}"
-    export CXXFLAGS="$CXXFLAGS ${FLAGS[*]}"
-    export LDFLAGS="$LDFLAGS -m64"
-    ;;
-esac
-
-commit_and_push()
-{
-    BRANCH=studio-1.4-dev
-    # check into a local git clone
-    rm -rf $SCRATCH/prebuilts/$PROJ/
-    mkdir -p $SCRATCH/prebuilts/$PROJ/
-    cd $SCRATCH/prebuilts/$PROJ/
-    git clone $UPSTREAM/$PROJ/$OS-x86 -b $BRANCH
-    GIT_REPO="$SCRATCH/prebuilts/$PROJ/$OS-x86"
-    cd $GIT_REPO
-    rm -rf *
-    mv $INSTALL/* $GIT_REPO
-    cp $SCRIPT_FILE $GIT_REPO
-    cp $COMMON_FILE $GIT_REPO
-
-    git add .
-    git commit -m "Adding binaries for $INSTALL_VER"
-
-    # execute this command to upload
-    #git push origin HEAD:refs/for/$BRANCH
-
-    rm -rf $RD
-}
diff --git a/build-python.sh b/build-python.sh
deleted file mode 100755
index a4a8224..0000000
--- a/build-python.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash -ex
-# latest version of this file can be found at
-# https://android.googlesource.com/platform/external/lldb-utils
-#
-# Download & build python on the local machine
-# works on Linux, OSX, and Windows (Cygwin)
-# leaves output in /tmp/prebuilts/python/$OS-x86
-
-PROJ=python
-VER=2.7.10
-MSVS=2013
-
-source $(dirname "$0")/build-common.sh build-common.sh
-
-BASE=Python-$VER
-TGZ=$BASE.tgz
-curl -L https://www.python.org/ftp/python/$VER/$TGZ -o $TGZ
-tar xzf $TGZ || cat $TGZ # if this fails, we're probably getting an http error
-cd $BASE
-
-case "$OS" in
-windows)
-	cp PC/pyconfig.h Include/
-	devenv PCbuild/pcbuild.sln /Upgrade
-	# some projects will fail and that's okay
-	devenv PCbuild/pcbuild.sln /Build Debug || true
-	devenv PCbuild/pcbuild.sln /Build Release || true
-	devenv PCbuild/pcbuild.sln /Build "Debug^|x64" || true
-	devenv PCbuild/pcbuild.sln /Build "Release^|x64" || true
-	curl -L http://llvm.org/svn/llvm-project/lldb/trunk/scripts/install_custom_python.py -o install_custom_python.py
-	python install_custom_python.py --source "$(cygpath -w "$RD/$BASE")" --dest "$(cygpath -w "$INSTALL")" --overwrite --silent
-	;;
-linux|darwin)
-	unset CFLAGS CXXFLAGS
-	mkdir $RD/build
-	cd $RD/build
-	$RD/$BASE/configure --prefix=$INSTALL --enable-unicode=ucs4 --enable-shared
-	make -j$CORES
-	make install
-	;;
-esac
-
-find $INSTALL '(' -name '*.pyc' -or -name '*.pyo' ')' -delete
-
-commit_and_push
diff --git a/include/python2.7/ceval.h b/include/python2.7/ceval.h
index 0e8bd2a..3735f00 100644
--- a/include/python2.7/ceval.h
+++ b/include/python2.7/ceval.h
@@ -50,7 +50,7 @@
              _Py_CheckRecursiveCall(where))
 #define Py_LeaveRecursiveCall()                         \
             (--PyThreadState_GET()->recursion_depth)
-PyAPI_FUNC(int) _Py_CheckRecursiveCall(char *where);
+PyAPI_FUNC(int) _Py_CheckRecursiveCall(const char *where);
 PyAPI_DATA(int) _Py_CheckRecursionLimit;
 #ifdef USE_STACKCHECK
 #  define _Py_MakeRecCheck(x)  (++(x) > --_Py_CheckRecursionLimit)
diff --git a/include/python2.7/code.h b/include/python2.7/code.h
index 38b2958..7456fd6 100644
--- a/include/python2.7/code.h
+++ b/include/python2.7/code.h
@@ -70,7 +70,7 @@
 /* Public interface */
 PyAPI_FUNC(PyCodeObject *) PyCode_New(
 	int, int, int, int, PyObject *, PyObject *, PyObject *, PyObject *,
-	PyObject *, PyObject *, PyObject *, PyObject *, int, PyObject *); 
+	PyObject *, PyObject *, PyObject *, PyObject *, int, PyObject *);
         /* same as struct above */
 
 /* Creates a new empty code object with the specified source location. */
@@ -98,6 +98,15 @@
 PyAPI_FUNC(int) _PyCode_CheckLineNumber(PyCodeObject* co,
                                         int lasti, PyAddrPair *bounds);
 
+/* Create a comparable key used to compare constants taking in account the
+ * object type. It is used to make sure types are not coerced (e.g., float and
+ * complex) _and_ to distinguish 0.0 from -0.0 e.g. on IEEE platforms
+ *
+ * Return (type(obj), obj, ...): a tuple with variable size (at least 2 items)
+ * depending on the type and the value. The type is the first item to not
+ * compare bytes and str which can raise a BytesWarning exception. */
+PyAPI_FUNC(PyObject*) _PyCode_ConstantKey(PyObject *obj);
+
 PyAPI_FUNC(PyObject*) PyCode_Optimize(PyObject *code, PyObject* consts,
                                       PyObject *names, PyObject *lineno_obj);
 
diff --git a/include/python2.7/codecs.h b/include/python2.7/codecs.h
index c038c6a..8a9041b 100644
--- a/include/python2.7/codecs.h
+++ b/include/python2.7/codecs.h
@@ -81,6 +81,51 @@
        const char *errors
        );
 
+/* Text codec specific encoding and decoding API.
+
+   Checks the encoding against a list of codecs which do not
+   implement a unicode<->bytes encoding before attempting the
+   operation.
+
+   Please note that these APIs are internal and should not
+   be used in Python C extensions.
+
+   XXX (ncoghlan): should we make these, or something like them, public
+   in Python 3.5+?
+
+ */
+PyAPI_FUNC(PyObject *) _PyCodec_LookupTextEncoding(
+       const char *encoding,
+       const char *alternate_command
+       );
+
+PyAPI_FUNC(PyObject *) _PyCodec_EncodeText(
+       PyObject *object,
+       const char *encoding,
+       const char *errors
+       );
+
+PyAPI_FUNC(PyObject *) _PyCodec_DecodeText(
+       PyObject *object,
+       const char *encoding,
+       const char *errors
+       );
+
+/* These two aren't actually text encoding specific, but _io.TextIOWrapper
+ * is the only current API consumer.
+ */
+PyAPI_FUNC(PyObject *) _PyCodecInfo_GetIncrementalDecoder(
+       PyObject *codec_info,
+       const char *errors
+       );
+
+PyAPI_FUNC(PyObject *) _PyCodecInfo_GetIncrementalEncoder(
+       PyObject *codec_info,
+       const char *errors
+       );
+
+
+
 /* --- Codec Lookup APIs -------------------------------------------------- 
 
    All APIs return a codec object with incremented refcount and are
diff --git a/include/python2.7/dictobject.h b/include/python2.7/dictobject.h
index ece01c6..ef524a4 100644
--- a/include/python2.7/dictobject.h
+++ b/include/python2.7/dictobject.h
@@ -108,6 +108,7 @@
 
 PyAPI_FUNC(PyObject *) PyDict_New(void);
 PyAPI_FUNC(PyObject *) PyDict_GetItem(PyObject *mp, PyObject *key);
+PyAPI_FUNC(PyObject *) _PyDict_GetItemWithError(PyObject *mp, PyObject *key);
 PyAPI_FUNC(int) PyDict_SetItem(PyObject *mp, PyObject *key, PyObject *item);
 PyAPI_FUNC(int) PyDict_DelItem(PyObject *mp, PyObject *key);
 PyAPI_FUNC(void) PyDict_Clear(PyObject *mp);
diff --git a/include/python2.7/fileobject.h b/include/python2.7/fileobject.h
index 1b540f9..434605d 100644
--- a/include/python2.7/fileobject.h
+++ b/include/python2.7/fileobject.h
@@ -70,16 +70,13 @@
 */
 int _PyFile_SanitizeMode(char *mode);
 
-#if defined _MSC_VER && _MSC_VER >= 1400
+#if defined _MSC_VER && _MSC_VER < 1900
 /* A routine to check if a file descriptor is valid on Windows.  Returns 0
  * and sets errno to EBADF if it isn't.  This is to avoid Assertions
  * from various functions in the Windows CRT beginning with
  * Visual Studio 2005
  */
 int _PyVerify_fd(int fd);
-#elif defined _MSC_VER && _MSC_VER >= 1200
-/* fdopen doesn't set errno EBADF and crashes for large fd on debug build */
-#define _PyVerify_fd(fd) (_get_osfhandle(fd) >= 0)
 #else
 #define _PyVerify_fd(A) (1) /* dummy */
 #endif
diff --git a/include/python2.7/object.h b/include/python2.7/object.h
index 4ee1f8e..8fe202d 100644
--- a/include/python2.7/object.h
+++ b/include/python2.7/object.h
@@ -824,6 +824,29 @@
 #define Py_XINCREF(op) do { if ((op) == NULL) ; else Py_INCREF(op); } while (0)
 #define Py_XDECREF(op) do { if ((op) == NULL) ; else Py_DECREF(op); } while (0)
 
+/* Safely decref `op` and set `op` to `op2`.
+ *
+ * As in case of Py_CLEAR "the obvious" code can be deadly:
+ *
+ *     Py_XDECREF(op);
+ *     op = op2;
+ *
+ * The safe way is:
+ *
+ *      Py_SETREF(op, op2);
+ *
+ * That arranges to set `op` to `op2` _before_ decref'ing, so that any code
+ * triggered as a side-effect of `op` getting torn down no longer believes
+ * `op` points to a valid object.
+ */
+
+#define Py_SETREF(op, op2)                      \
+    do {                                        \
+        PyObject *_py_tmp = (PyObject *)(op);   \
+        (op) = (op2);                           \
+        Py_XDECREF(_py_tmp);                    \
+    } while (0)
+
 /*
 These are provided as conveniences to Python runtime embedders, so that
 they can have object code that is not dependent on Python compilation flags.
diff --git a/include/python2.7/opcode.h b/include/python2.7/opcode.h
index 9764109..9ed5487 100644
--- a/include/python2.7/opcode.h
+++ b/include/python2.7/opcode.h
@@ -37,12 +37,21 @@
 
 #define SLICE		30
 /* Also uses 31-33 */
+#define SLICE_1		31
+#define SLICE_2		32
+#define SLICE_3		33
 
 #define STORE_SLICE	40
 /* Also uses 41-43 */
+#define STORE_SLICE_1	41
+#define STORE_SLICE_2	42
+#define STORE_SLICE_3	43
 
 #define DELETE_SLICE	50
 /* Also uses 51-53 */
+#define DELETE_SLICE_1	51
+#define DELETE_SLICE_2	52
+#define DELETE_SLICE_3	53
 
 #define STORE_MAP	54
 #define INPLACE_ADD	55
diff --git a/include/python2.7/patchlevel.h b/include/python2.7/patchlevel.h
index a71efde..4d6e334 100644
--- a/include/python2.7/patchlevel.h
+++ b/include/python2.7/patchlevel.h
@@ -22,12 +22,12 @@
 /*--start constants--*/
 #define PY_MAJOR_VERSION	2
 #define PY_MINOR_VERSION	7
-#define PY_MICRO_VERSION	10
+#define PY_MICRO_VERSION	11
 #define PY_RELEASE_LEVEL	PY_RELEASE_LEVEL_FINAL
 #define PY_RELEASE_SERIAL	0
 
 /* Version as a string */
-#define PY_VERSION      	"2.7.10"
+#define PY_VERSION      	"2.7.11+"
 /*--end constants--*/
 
 /* Subversion Revision number of this file (not of the repository). Empty
diff --git a/include/python2.7/pyconfig.h b/include/python2.7/pyconfig.h
index 957ad23..bcfab04 100644
--- a/include/python2.7/pyconfig.h
+++ b/include/python2.7/pyconfig.h
@@ -119,6 +119,9 @@
 /* Define to 1 if you have the `clock' function. */
 #define HAVE_CLOCK 1
 
+/* Define if the C compiler supports computed gotos. */
+#define HAVE_COMPUTED_GOTOS 1
+
 /* Define to 1 if you have the `confstr' function. */
 #define HAVE_CONFSTR 1
 
@@ -549,7 +552,7 @@
 #define HAVE_PUTENV 1
 
 /* Define if the libcrypto has RAND_egd */
-#define HAVE_RAND_EGD 1
+/* #undef HAVE_RAND_EGD */
 
 /* Define to 1 if you have the `readlink' function. */
 #define HAVE_READLINK 1
@@ -729,10 +732,6 @@
 /* Define to 1 if `tm_zone' is a member of `struct tm'. */
 #define HAVE_STRUCT_TM_TM_ZONE 1
 
-/* Define to 1 if your `struct stat' has `st_blocks'. Deprecated, use
-   `HAVE_STRUCT_STAT_ST_BLOCKS' instead. */
-#define HAVE_ST_BLOCKS 1
-
 /* Define if you have the 'symlink' function. */
 #define HAVE_SYMLINK 1
 
@@ -1070,6 +1069,9 @@
 /* Define to 1 if your <sys/time.h> declares `struct tm'. */
 /* #undef TM_IN_SYS_TIME */
 
+/* Define if you want to use computed gotos in ceval.c. */
+/* #undef USE_COMPUTED_GOTOS */
+
 /* Enable extensions on AIX 3, Interix.  */
 #ifndef _ALL_SOURCE
 # define _ALL_SOURCE 1
diff --git a/include/python2.7/pyerrors.h b/include/python2.7/pyerrors.h
index 2ef205e..51134ef 100644
--- a/include/python2.7/pyerrors.h
+++ b/include/python2.7/pyerrors.h
@@ -215,7 +215,7 @@
 
 /* Export the old function so that the existing API remains available: */
 PyAPI_FUNC(void) PyErr_BadInternalCall(void);
-PyAPI_FUNC(void) _PyErr_BadInternalCall(char *filename, int lineno);
+PyAPI_FUNC(void) _PyErr_BadInternalCall(const char *filename, int lineno);
 /* Mask the old API with a call to the new API for code compiled under
    Python 2.0: */
 #define PyErr_BadInternalCall() _PyErr_BadInternalCall(__FILE__, __LINE__)
diff --git a/include/python2.7/pymath.h b/include/python2.7/pymath.h
index e3cf22b..d35c87c 100644
--- a/include/python2.7/pymath.h
+++ b/include/python2.7/pymath.h
@@ -152,7 +152,29 @@
  * doesn't support NaNs.
  */
 #if !defined(Py_NAN) && !defined(Py_NO_NAN)
-#define Py_NAN (Py_HUGE_VAL * 0.)
+#if !defined(__INTEL_COMPILER)
+    #define Py_NAN (Py_HUGE_VAL * 0.)
+#else /* __INTEL_COMPILER */
+    #if defined(ICC_NAN_STRICT)
+        #pragma float_control(push)
+        #pragma float_control(precise, on)
+        #pragma float_control(except,  on)
+        #if defined(_MSC_VER)
+            __declspec(noinline)
+        #else /* Linux */
+            __attribute__((noinline))
+        #endif /* _MSC_VER */
+        static double __icc_nan()
+        {
+            return sqrt(-1.0);
+        }
+        #pragma float_control (pop)
+        #define Py_NAN __icc_nan()
+    #else /* ICC_NAN_RELAXED as default for Intel Compiler */
+        static union { unsigned char buf[8]; double __icc_nan; } __nan_store = {0,0,0,0,0,0,0xf8,0x7f};
+        #define Py_NAN (__nan_store.__icc_nan)
+    #endif /* ICC_NAN_STRICT */
+#endif /* __INTEL_COMPILER */
 #endif
 
 /* Py_OVERFLOWED(X)
diff --git a/include/python2.7/unicodeobject.h b/include/python2.7/unicodeobject.h
index 9ab724a..1269bf6 100644
--- a/include/python2.7/unicodeobject.h
+++ b/include/python2.7/unicodeobject.h
@@ -1307,7 +1307,7 @@
     int op                      /* Operation: Py_EQ, Py_NE, Py_GT, etc. */
     );
 
-/* Apply a argument tuple or dictionary to a format string and return
+/* Apply an argument tuple or dictionary to a format string and return
    the resulting Unicode string. */
 
 PyAPI_FUNC(PyObject *) PyUnicode_Format(
diff --git a/lib/libpython2.7.so.1.0 b/lib/libpython2.7.so.1.0
index b7d92da..bc30d8a 100755
--- a/lib/libpython2.7.so.1.0
+++ b/lib/libpython2.7.so.1.0
Binary files differ
diff --git a/lib/pkgconfig/python-2.7.pc b/lib/pkgconfig/python-2.7.pc
index 8e52153..455c2b9 100644
--- a/lib/pkgconfig/python-2.7.pc
+++ b/lib/pkgconfig/python-2.7.pc
@@ -1,4 +1,4 @@
-prefix=/tmp/python-chaorenl/install
+prefix=
 exec_prefix=${prefix}
 libdir=${exec_prefix}/lib
 includedir=${prefix}/include
diff --git a/lib/python2.7/CGIHTTPServer.py b/lib/python2.7/CGIHTTPServer.py
index 8f8ae56..5620083 100644
--- a/lib/python2.7/CGIHTTPServer.py
+++ b/lib/python2.7/CGIHTTPServer.py
@@ -84,7 +84,7 @@
         path begins with one of the strings in self.cgi_directories
         (and the next character is a '/' or the end of the string).
         """
-        collapsed_path = _url_collapse_path(urllib.unquote(self.path))
+        collapsed_path = _url_collapse_path(self.path)
         dir_sep = collapsed_path.find('/', 1)
         head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
         if head in self.cgi_directories:
@@ -120,11 +120,7 @@
                 break
 
         # find an explicit query string, if present.
-        i = rest.rfind('?')
-        if i >= 0:
-            rest, query = rest[:i], rest[i+1:]
-        else:
-            query = ''
+        rest, _, query = rest.partition('?')
 
         # dissect the part after the directory name into a script name &
         # a possible additional path, to be stored in PATH_INFO.
@@ -308,13 +304,15 @@
     The utility of this function is limited to is_cgi method and helps
     preventing some security attacks.
 
-    Returns: A tuple of (head, tail) where tail is everything after the final /
-    and head is everything before it.  Head will always start with a '/' and,
-    if it contains anything else, never have a trailing '/'.
+    Returns: The reconstituted URL, which will always start with a '/'.
 
     Raises: IndexError if too many '..' occur within the path.
 
     """
+    # Query component should not be involved.
+    path, _, query = path.partition('?')
+    path = urllib.unquote(path)
+
     # Similar to os.path.split(os.path.normpath(path)) but specific to URL
     # path semantics rather than local operating system semantics.
     path_parts = path.split('/')
@@ -335,6 +333,9 @@
     else:
         tail_part = ''
 
+    if query:
+        tail_part = '?'.join((tail_part, query))
+
     splitpath = ('/' + '/'.join(head_parts), tail_part)
     collapsed_path = "/".join(splitpath)
 
diff --git a/lib/python2.7/LICENSE.txt b/lib/python2.7/LICENSE.txt
index 88251f5..84a3337 100644
--- a/lib/python2.7/LICENSE.txt
+++ b/lib/python2.7/LICENSE.txt
@@ -74,8 +74,9 @@
 distribute, and otherwise use Python alone or in any derivative version,
 provided, however, that PSF's License Agreement and PSF's notice of copyright,
 i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
-are retained in Python alone or in any derivative version prepared by Licensee.
+2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+Reserved" are retained in Python alone or in any derivative version prepared by
+Licensee.
 
 3. In the event Licensee prepares a derivative work that is based on
 or incorporates Python or any part thereof, and wants to make
diff --git a/lib/python2.7/SocketServer.py b/lib/python2.7/SocketServer.py
index 23ce6fc..d324a21 100644
--- a/lib/python2.7/SocketServer.py
+++ b/lib/python2.7/SocketServer.py
@@ -121,11 +121,6 @@
 
 # Author of the BaseServer patch: Luke Kenneth Casson Leighton
 
-# XXX Warning!
-# There is a test suite for this module, but it cannot be run by the
-# standard regression test.
-# To run it manually, run Lib/test/test_socketserver.py.
-
 __version__ = "0.4"
 
 
@@ -296,6 +291,8 @@
             except:
                 self.handle_error(request, client_address)
                 self.shutdown_request(request)
+        else:
+            self.shutdown_request(request)
 
     def handle_timeout(self):
         """Called if no new request arrives within self.timeout.
@@ -642,7 +639,7 @@
     client address as self.client_address, and the server (in case it
     needs access to per-server information) as self.server.  Since a
     separate instance is created for each request, the handle() method
-    can define arbitrary other instance variariables.
+    can define other arbitrary instance variables.
 
     """
 
@@ -719,9 +716,6 @@
 
 class DatagramRequestHandler(BaseRequestHandler):
 
-    # XXX Regrettably, I cannot get this working on Linux;
-    # s.recvfrom() doesn't return a meaningful client address.
-
     """Define self.rfile and self.wfile for datagram sockets."""
 
     def setup(self):
diff --git a/lib/python2.7/UserDict.py b/lib/python2.7/UserDict.py
index bb2218a..732b327 100644
--- a/lib/python2.7/UserDict.py
+++ b/lib/python2.7/UserDict.py
@@ -1,7 +1,24 @@
 """A more or less complete user-defined wrapper around dictionary objects."""
 
 class UserDict:
-    def __init__(self, dict=None, **kwargs):
+    def __init__(*args, **kwargs):
+        if not args:
+            raise TypeError("descriptor '__init__' of 'UserDict' object "
+                            "needs an argument")
+        self = args[0]
+        args = args[1:]
+        if len(args) > 1:
+            raise TypeError('expected at most 1 arguments, got %d' % len(args))
+        if args:
+            dict = args[0]
+        elif 'dict' in kwargs:
+            dict = kwargs.pop('dict')
+            import warnings
+            warnings.warn("Passing 'dict' as keyword argument is "
+                          "deprecated", PendingDeprecationWarning,
+                          stacklevel=2)
+        else:
+            dict = None
         self.data = {}
         if dict is not None:
             self.update(dict)
@@ -43,7 +60,23 @@
     def itervalues(self): return self.data.itervalues()
     def values(self): return self.data.values()
     def has_key(self, key): return key in self.data
-    def update(self, dict=None, **kwargs):
+    def update(*args, **kwargs):
+        if not args:
+            raise TypeError("descriptor 'update' of 'UserDict' object "
+                            "needs an argument")
+        self = args[0]
+        args = args[1:]
+        if len(args) > 1:
+            raise TypeError('expected at most 1 arguments, got %d' % len(args))
+        if args:
+            dict = args[0]
+        elif 'dict' in kwargs:
+            dict = kwargs.pop('dict')
+            import warnings
+            warnings.warn("Passing 'dict' as keyword argument is deprecated",
+                          PendingDeprecationWarning, stacklevel=2)
+        else:
+            dict = None
         if dict is None:
             pass
         elif isinstance(dict, UserDict):
diff --git a/lib/python2.7/_abcoll.py b/lib/python2.7/_abcoll.py
index 0385627..b643692 100644
--- a/lib/python2.7/_abcoll.py
+++ b/lib/python2.7/_abcoll.py
@@ -453,6 +453,7 @@
         for key in self._mapping:
             yield key
 
+KeysView.register(type({}.viewkeys()))
 
 class ItemsView(MappingView, Set):
 
@@ -473,6 +474,7 @@
         for key in self._mapping:
             yield (key, self._mapping[key])
 
+ItemsView.register(type({}.viewitems()))
 
 class ValuesView(MappingView):
 
@@ -486,6 +488,7 @@
         for key in self._mapping:
             yield self._mapping[key]
 
+ValuesView.register(type({}.viewvalues()))
 
 class MutableMapping(Mapping):
 
diff --git a/lib/python2.7/_pyio.py b/lib/python2.7/_pyio.py
index a7f4301..694b778 100644
--- a/lib/python2.7/_pyio.py
+++ b/lib/python2.7/_pyio.py
@@ -7,6 +7,7 @@
 import os
 import abc
 import codecs
+import sys
 import warnings
 import errno
 # Import thread instead of threading to reduce startup cost
@@ -1497,6 +1498,11 @@
         if not isinstance(encoding, basestring):
             raise ValueError("invalid encoding: %r" % encoding)
 
+        if sys.py3kwarning and not codecs.lookup(encoding)._is_text_encoding:
+            msg = ("%r is not a text encoding; "
+                   "use codecs.open() to handle arbitrary codecs")
+            warnings.warnpy3k(msg % encoding, stacklevel=2)
+
         if errors is None:
             errors = "strict"
         else:
diff --git a/lib/python2.7/_strptime.py b/lib/python2.7/_strptime.py
index 1bd570d..feac05a 100644
--- a/lib/python2.7/_strptime.py
+++ b/lib/python2.7/_strptime.py
@@ -75,6 +75,8 @@
         self.__calc_date_time()
         if _getlang() != self.lang:
             raise ValueError("locale changed during initialization")
+        if time.tzname != self.tzname or time.daylight != self.daylight:
+            raise ValueError("timezone changed during initialization")
 
     def __pad(self, seq, front):
         # Add '' to seq to either the front (is True), else the back.
@@ -159,15 +161,17 @@
 
     def __calc_timezone(self):
         # Set self.timezone by using time.tzname.
-        # Do not worry about possibility of time.tzname[0] == timetzname[1]
-        # and time.daylight; handle that in strptime .
+        # Do not worry about possibility of time.tzname[0] == time.tzname[1]
+        # and time.daylight; handle that in strptime.
         try:
             time.tzset()
         except AttributeError:
             pass
-        no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
-        if time.daylight:
-            has_saving = frozenset([time.tzname[1].lower()])
+        self.tzname = time.tzname
+        self.daylight = time.daylight
+        no_saving = frozenset(["utc", "gmt", self.tzname[0].lower()])
+        if self.daylight:
+            has_saving = frozenset([self.tzname[1].lower()])
         else:
             has_saving = frozenset()
         self.timezone = (no_saving, has_saving)
@@ -296,12 +300,15 @@
     """Return a time struct based on the input string and the format string."""
     global _TimeRE_cache, _regex_cache
     with _cache_lock:
-        if _getlang() != _TimeRE_cache.locale_time.lang:
+        locale_time = _TimeRE_cache.locale_time
+        if (_getlang() != locale_time.lang or
+            time.tzname != locale_time.tzname or
+            time.daylight != locale_time.daylight):
             _TimeRE_cache = TimeRE()
             _regex_cache.clear()
+            locale_time = _TimeRE_cache.locale_time
         if len(_regex_cache) > _CACHE_MAX_SIZE:
             _regex_cache.clear()
-        locale_time = _TimeRE_cache.locale_time
         format_regex = _regex_cache.get(format)
         if not format_regex:
             try:
@@ -438,6 +445,10 @@
         week_starts_Mon = True if week_of_year_start == 0 else False
         julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
                                             week_starts_Mon)
+        if julian <= 0:
+            year -= 1
+            yday = 366 if calendar.isleap(year) else 365
+            julian += yday
     # Cannot pre-calculate datetime_date() since can change in Julian
     # calculation and thus could have different value for the day of the week
     # calculation.
diff --git a/lib/python2.7/_sysconfigdata.py b/lib/python2.7/_sysconfigdata.py
index 41343a3..e1b1949 100644
--- a/lib/python2.7/_sysconfigdata.py
+++ b/lib/python2.7/_sysconfigdata.py
@@ -3,39 +3,42 @@
  'AIX_GENUINE_CPLUSPLUS': 0,
  'AR': 'ar',
  'ARFLAGS': 'rc',
- 'ASDLGEN': '/tmp/python-chaorenl/Python-2.7.10/Parser/asdl_c.py',
- 'ASDLGEN_FILES': '/tmp/python-chaorenl/Python-2.7.10/Parser/asdl.py /tmp/python-chaorenl/Python-2.7.10/Parser/asdl_c.py',
- 'AST_ASDL': '/tmp/python-chaorenl/Python-2.7.10/Parser/Python.asdl',
+ 'ASDLGEN': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/source/Parser/asdl_c.py',
+ 'ASDLGEN_FILES': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/source/Parser/asdl.py /buildbot/src/googleplex-android/lldb-master-dev/out/python/source/Parser/asdl_c.py',
+ 'AST_ASDL': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/source/Parser/Python.asdl',
  'AST_C': 'Python/Python-ast.c',
  'AST_C_DIR': 'Python',
  'AST_H': 'Include/Python-ast.h',
  'AST_H_DIR': 'Include',
  'ATHEOS_THREADS': 0,
- 'BASECFLAGS': '-fno-strict-aliasing -OPT:Olimit=0',
+ 'BASECFLAGS': '-fno-strict-aliasing',
  'BASEMODLIBS': '',
  'BEOS_THREADS': 0,
- 'BINDIR': '/tmp/python-chaorenl/install/bin',
- 'BINLIBDEST': '/tmp/python-chaorenl/install/lib/python2.7',
+ 'BINDIR': '/bin',
+ 'BINLIBDEST': '/lib/python2.7',
  'BLDLIBRARY': '-L. -lpython2.7',
- 'BLDSHARED': 'clang-3.5 -pthread -shared -m64',
+ 'BLDSHARED': '/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/clang/linux-x86/host/3.6/bin/clang -pthread -shared -fuse-ld=gold --gcc-toolchain=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 --sysroot /buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/sysroot -B/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux- -m64  -Wl,-rpath,\\$ORIGIN/../lib:\\$ORIGIN/../..',
  'BUILDEXE': '',
  'BUILDPYTHON': 'python',
- 'CC': 'clang-3.5 -pthread',
+ 'CC': '/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/clang/linux-x86/host/3.6/bin/clang -pthread',
  'CCSHARED': '-fPIC',
- 'CFLAGS': '-fno-strict-aliasing -OPT:Olimit=0 -g -O2 -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes',
+ 'CFLAGS': '-fno-strict-aliasing -fuse-ld=gold --gcc-toolchain=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 --sysroot /buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/sysroot -B/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux- -m64  -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes',
  'CFLAGSFORSHARED': '-fPIC',
  'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in',
- 'CONFIG_ARGS': "'--prefix=/tmp/python-chaorenl/install' '--enable-unicode=ucs4' '--enable-shared' 'CC=clang-3.5' 'LDFLAGS= -m64'",
- 'CONFINCLUDEDIR': '/tmp/python-chaorenl/install/include',
- 'CONFINCLUDEPY': '/tmp/python-chaorenl/install/include/python2.7',
+ 'CONFIG_ARGS': "'--prefix=' '--enable-unicode=ucs4' '--enable-shared' 'CC=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/clang/linux-x86/host/3.6/bin/clang' 'CFLAGS=-fuse-ld=gold --gcc-toolchain=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 --sysroot /buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/sysroot -B/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux- -m64 ' 'LDFLAGS=-fuse-ld=gold --gcc-toolchain=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 --sysroot /buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/sysroot -B/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux- -m64  -Wl,-rpath,\\$ORIGIN/../lib:\\$ORIGIN/../..'",
+ 'CONFINCLUDEDIR': '/include',
+ 'CONFINCLUDEPY': '/include/python2.7',
  'COREPYTHONPATH': ':plat-linux2:lib-tk:lib-old',
- 'CPPFLAGS': '-I. -IInclude -I/tmp/python-chaorenl/Python-2.7.10/Include',
- 'CXX': 'clang++-3.5 -pthread',
+ 'COVERAGE_INFO': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/build/coverage.info',
+ 'COVERAGE_REPORT': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/build/lcov-report',
+ 'COVERAGE_REPORT_OPTIONS': '--no-branch-coverage --title "CPython lcov report"',
+ 'CPPFLAGS': '-I. -IInclude -I/buildbot/src/googleplex-android/lldb-master-dev/out/python/source/Include',
+ 'CXX': '/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/clang/linux-x86/host/3.6/bin/clang++ -pthread',
  'C_THREADS': 0,
- 'DESTDIRS': '/tmp/python-chaorenl/install /tmp/python-chaorenl/install/lib /tmp/python-chaorenl/install/lib/python2.7 /tmp/python-chaorenl/install/lib/python2.7/lib-dynload',
- 'DESTLIB': '/tmp/python-chaorenl/install/lib/python2.7',
+ 'DESTDIRS': '/lib /lib/python2.7 /lib/python2.7/lib-dynload',
+ 'DESTLIB': '/lib/python2.7',
  'DESTPATH': '',
- 'DESTSHARED': '/tmp/python-chaorenl/install/lib/python2.7/lib-dynload',
+ 'DESTSHARED': '/lib/python2.7/lib-dynload',
  'DIRMODE': 755,
  'DIST': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in Include Lib Misc Demo Ext-dummy',
  'DISTDIRS': 'Include Lib Misc Demo Ext-dummy',
@@ -62,7 +65,7 @@
  'GNULD': 'yes',
  'GRAMMAR_C': 'Python/graminit.c',
  'GRAMMAR_H': 'Include/graminit.h',
- 'GRAMMAR_INPUT': '/tmp/python-chaorenl/Python-2.7.10/Grammar/Grammar',
+ 'GRAMMAR_INPUT': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/source/Grammar/Grammar',
  'HAVE_ACOSH': 1,
  'HAVE_ADDRINFO': 1,
  'HAVE_ALARM': 1,
@@ -87,6 +90,7 @@
  'HAVE_CHOWN': 1,
  'HAVE_CHROOT': 1,
  'HAVE_CLOCK': 1,
+ 'HAVE_COMPUTED_GOTOS': 1,
  'HAVE_CONFSTR': 1,
  'HAVE_CONIO_H': 0,
  'HAVE_COPYSIGN': 1,
@@ -227,7 +231,7 @@
  'HAVE_PTHREAD_SIGMASK': 1,
  'HAVE_PTY_H': 1,
  'HAVE_PUTENV': 1,
- 'HAVE_RAND_EGD': 1,
+ 'HAVE_RAND_EGD': 0,
  'HAVE_READLINK': 1,
  'HAVE_REALPATH': 1,
  'HAVE_RL_CALLBACK': 0,
@@ -287,7 +291,6 @@
  'HAVE_STRUCT_STAT_ST_GEN': 0,
  'HAVE_STRUCT_STAT_ST_RDEV': 1,
  'HAVE_STRUCT_TM_TM_ZONE': 1,
- 'HAVE_ST_BLOCKS': 1,
  'HAVE_SYMLINK': 1,
  'HAVE_SYSCONF': 1,
  'HAVE_SYSEXITS_H': 1,
@@ -350,56 +353,59 @@
  'HAVE_WORKING_TZSET': 1,
  'HAVE_ZLIB_COPY': 1,
  'HAVE__GETPTY': 0,
- 'HGBRANCH': 'hg id -b /tmp/python-chaorenl/Python-2.7.10',
- 'HGTAG': 'hg id -t /tmp/python-chaorenl/Python-2.7.10',
- 'HGVERSION': 'hg id -i /tmp/python-chaorenl/Python-2.7.10',
+ 'HGBRANCH': '',
+ 'HGTAG': '',
+ 'HGVERSION': '',
  'HOST_GNU_TYPE': 'x86_64-unknown-linux-gnu',
  'HURD_C_THREADS': 0,
- 'INCLDIRSTOMAKE': '/tmp/python-chaorenl/install/include /tmp/python-chaorenl/install/include /tmp/python-chaorenl/install/include/python2.7 /tmp/python-chaorenl/install/include/python2.7',
- 'INCLUDEDIR': '/tmp/python-chaorenl/install/include',
- 'INCLUDEPY': '/tmp/python-chaorenl/install/include/python2.7',
+ 'INCLDIRSTOMAKE': '/include /include /include/python2.7 /include/python2.7',
+ 'INCLUDEDIR': '/include',
+ 'INCLUDEPY': '/include/python2.7',
  'INSTALL': '/usr/bin/install -c',
  'INSTALL_DATA': '/usr/bin/install -c -m 644',
  'INSTALL_PROGRAM': '/usr/bin/install -c',
  'INSTALL_SCRIPT': '/usr/bin/install -c',
  'INSTALL_SHARED': '/usr/bin/install -c -m 555',
  'INSTSONAME': 'libpython2.7.so.1.0',
- 'LDCXXSHARED': 'clang++-3.5 -pthread -shared',
- 'LDFLAGS': '-m64',
+ 'LDCXXSHARED': '/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/clang/linux-x86/host/3.6/bin/clang++ -pthread -shared',
+ 'LDFLAGS': '-fuse-ld=gold --gcc-toolchain=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 --sysroot /buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/sysroot -B/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux- -m64  -Wl,-rpath,\\$ORIGIN/../lib:\\$ORIGIN/../..',
  'LDLAST': '',
  'LDLIBRARY': 'libpython2.7.so',
  'LDLIBRARYDIR': '',
- 'LDSHARED': 'clang-3.5 -pthread -shared -m64',
+ 'LDSHARED': '/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/clang/linux-x86/host/3.6/bin/clang -pthread -shared -fuse-ld=gold --gcc-toolchain=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 --sysroot /buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/sysroot -B/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux- -m64  -Wl,-rpath,\\$ORIGIN/../lib:\\$ORIGIN/../..',
  'LIBC': '',
- 'LIBDEST': '/tmp/python-chaorenl/install/lib/python2.7',
- 'LIBDIR': '/tmp/python-chaorenl/install/lib',
+ 'LIBDEST': '/lib/python2.7',
+ 'LIBDIR': '/lib',
  'LIBFFI_INCLUDEDIR': '',
  'LIBM': '-lm',
  'LIBOBJDIR': 'Python/',
  'LIBOBJS': '',
- 'LIBP': '/tmp/python-chaorenl/install/lib/python2.7',
- 'LIBPC': '/tmp/python-chaorenl/install/lib/pkgconfig',
- 'LIBPL': '/tmp/python-chaorenl/install/lib/python2.7/config',
+ 'LIBP': '/lib/python2.7',
+ 'LIBPC': '/lib/pkgconfig',
+ 'LIBPL': '/lib/python2.7/config',
  'LIBRARY': 'libpython2.7.a',
  'LIBRARY_OBJS': '\\',
  'LIBS': '-lpthread -ldl  -lutil',
  'LIBSUBDIRS': 'lib-tk lib-tk/test lib-tk/test/test_tkinter \\',
- 'LINKCC': 'clang-3.5 -pthread',
+ 'LINKCC': '/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/clang/linux-x86/host/3.6/bin/clang -pthread',
  'LINKFORSHARED': '-Xlinker -export-dynamic',
+ 'LLVM_PROF_ERR': 'yes',
+ 'LLVM_PROF_FILE': 'LLVM_PROFILE_FILE="code-%p.profclangr"',
+ 'LLVM_PROF_MERGER': 'llvm-profdata merge -output=code.profclangd *.profclangr',
  'LN': 'ln',
  'LOCALMODLIBS': '',
  'MACHDEP': 'linux2',
  'MACHDEPPATH': ':plat-linux2',
  'MACHDEPS': 'plat-linux2',
  'MACHDEP_OBJS': '',
- 'MACHDESTLIB': '/tmp/python-chaorenl/install/lib/python2.7',
+ 'MACHDESTLIB': '/lib/python2.7',
  'MACH_C_THREADS': 0,
  'MACOSX_DEPLOYMENT_TARGET': '',
- 'MAINCC': 'clang-3.5 -pthread',
+ 'MAINCC': '/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/clang/linux-x86/host/3.6/bin/clang -pthread',
  'MAJOR_IN_MKDEV': 0,
  'MAJOR_IN_SYSMACROS': 0,
- 'MAKESETUP': '/tmp/python-chaorenl/Python-2.7.10/Modules/makesetup',
- 'MANDIR': '/tmp/python-chaorenl/install/share/man',
+ 'MAKESETUP': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/source/Modules/makesetup',
+ 'MANDIR': '/share/man',
  'MEMTESTOPTS': '-l -x test_subprocess test_io test_lib2to3 \\ -x test_dl test___all__ test_fork1 \\',
  'MKDIR_P': '/bin/mkdir -p',
  'MODLIBS': '',
@@ -409,6 +415,9 @@
  'MVWDELCH_IS_EXPRESSION': 1,
  'OBJECT_OBJS': '\\',
  'OLDPATH': ':lib-old',
+ 'OPCODETARGETGEN': '\\',
+ 'OPCODETARGETGEN_FILES': '\\',
+ 'OPCODETARGETS_H': '\\',
  'OPT': '-DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes',
  'OTHER_LIBTOOL_OPT': '',
  'PACKAGE_BUGREPORT': 0,
@@ -423,13 +432,15 @@
  'PGENOBJS': '\\ \\',
  'PGENSRCS': '\\ \\',
  'PGOBJS': '\\',
+ 'PGO_PROF_GEN_FLAG': '-fprofile-instr-generate',
+ 'PGO_PROF_USE_FLAG': '-fprofile-instr-use=code.profclangd',
  'PGSRCS': '\\',
  'PLATDIR': 'plat-linux2',
  'PLATMACDIRS': 'plat-mac plat-mac/Carbon plat-mac/lib-scriptpackages \\',
  'PLATMACPATH': ':plat-mac:plat-mac/lib-scriptpackages',
  'POBJS': '\\',
  'POSIX_SEMAPHORES_NOT_ENABLED': 0,
- 'PROFILE_TASK': '/tmp/python-chaorenl/Python-2.7.10/Tools/pybench/pybench.py -n 2 --with-gc --with-syscheck',
+ 'PROFILE_TASK': '-m test.regrtest --pgo',
  'PSRCS': '\\',
  'PTHREAD_SYSTEM_SCHED_SUPPORTED': 1,
  'PURIFY': '',
@@ -443,7 +454,7 @@
  'PYTHON_FOR_BUILD': './python -E',
  'PYTHON_HEADERS': '\\',
  'PYTHON_OBJS': '\\',
- 'PY_CFLAGS': '-fno-strict-aliasing -OPT:Olimit=0 -g -O2 -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -I. -IInclude -I/tmp/python-chaorenl/Python-2.7.10/Include -fPIC -DPy_BUILD_CORE',
+ 'PY_CFLAGS': '-fno-strict-aliasing -fuse-ld=gold --gcc-toolchain=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 --sysroot /buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/sysroot -B/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux- -m64  -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -I. -IInclude -I/buildbot/src/googleplex-android/lldb-master-dev/out/python/source/Include -fPIC -DPy_BUILD_CORE',
  'PY_FORMAT_LONG_LONG': '"ll"',
  'PY_FORMAT_SIZE_T': '"z"',
  'PY_UNICODE_TYPE': 0,
@@ -455,8 +466,8 @@
  'RANLIB': 'ranlib',
  'RESSRCDIR': 'Mac/Resources/framework',
  'RETSIGTYPE': 'void',
- 'RUNSHARED': 'LD_LIBRARY_PATH=/tmp/python-chaorenl/build',
- 'SCRIPTDIR': '/tmp/python-chaorenl/install/lib',
+ 'RUNSHARED': 'LD_LIBRARY_PATH=/buildbot/src/googleplex-android/lldb-master-dev/out/python/build',
+ 'SCRIPTDIR': '/lib',
  'SETPGRP_HAVE_ARG': 0,
  'SGI_ABI': '',
  'SHELL': '/bin/sh',
@@ -484,13 +495,13 @@
  'SIZEOF__BOOL': 1,
  'SO': '.so',
  'SRCDIRS': 'Parser Grammar Objects Python Modules Mac',
- 'SRC_GDB_HOOKS': '/tmp/python-chaorenl/Python-2.7.10/Tools/gdb/libpython.py',
+ 'SRC_GDB_HOOKS': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/source/Tools/gdb/libpython.py',
  'STDC_HEADERS': 1,
  'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */",
  'STRINGLIB_HEADERS': '\\',
  'SUBDIRS': '',
  'SUBDIRSTOO': 'Include Lib Misc Demo',
- 'SVNVERSION': 'svnversion /tmp/python-chaorenl/Python-2.7.10',
+ 'SVNVERSION': 'echo Unversioned directory',
  'SYSLIBS': '-lm',
  'SYS_SELECT_WITH_SYS_TIME': 1,
  'TANH_PRESERVES_ZERO_SIGN': 1,
@@ -498,8 +509,8 @@
  'TCLTK_LIBS': '',
  'TESTOPTS': '-l',
  'TESTPATH': '',
- 'TESTPROG': '/tmp/python-chaorenl/Python-2.7.10/Lib/test/regrtest.py',
- 'TESTPYTHON': 'LD_LIBRARY_PATH=/tmp/python-chaorenl/build ./python -Wd -3 -E -tt',
+ 'TESTPROG': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/source/Lib/test/regrtest.py',
+ 'TESTPYTHON': 'LD_LIBRARY_PATH=/buildbot/src/googleplex-android/lldb-master-dev/out/python/build ./python -Wd -3 -E -tt',
  'TESTPYTHONOPTS': '',
  'THREADOBJ': 'Python/thread.o',
  'TIME_WITH_SYS_TIME': 1,
@@ -507,10 +518,11 @@
  'TM_IN_SYS_TIME': 0,
  'UNICODE_OBJS': 'Objects/unicodeobject.o Objects/unicodectype.o',
  'UNIVERSALSDK': '',
+ 'USE_COMPUTED_GOTOS': 0,
  'USE_TOOLBOX_OBJECT_GLUE': 0,
  'VA_LIST_IS_ARRAY': 1,
  'VERSION': '2.7',
- 'VPATH': '/tmp/python-chaorenl/Python-2.7.10',
+ 'VPATH': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/source',
  'WANT_SIGFPE_HANDLER': 0,
  'WANT_WCTYPE_FUNCTIONS': 0,
  'WINDOW_HAS_FLAGS': 1,
@@ -524,11 +536,11 @@
  'WITH_VALGRIND': 0,
  'X87_DOUBLE_ROUNDING': 0,
  'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax',
- 'abs_builddir': '/tmp/python-chaorenl/build',
- 'abs_srcdir': '/tmp/python-chaorenl/Python-2.7.10',
+ 'abs_builddir': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/build',
+ 'abs_srcdir': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/source',
  'build': 'x86_64-unknown-linux-gnu',
- 'datarootdir': '/tmp/python-chaorenl/install/share',
- 'exec_prefix': '/tmp/python-chaorenl/install',
+ 'datarootdir': '/share',
+ 'exec_prefix': '',
  'host': 'x86_64-unknown-linux-gnu',
- 'prefix': '/tmp/python-chaorenl/install',
- 'srcdir': '/tmp/python-chaorenl/Python-2.7.10'}
+ 'prefix': '',
+ 'srcdir': '/buildbot/src/googleplex-android/lldb-master-dev/out/python/source'}
diff --git a/lib/python2.7/base64.py b/lib/python2.7/base64.py
index 85204dd..38bc61e 100755
--- a/lib/python2.7/base64.py
+++ b/lib/python2.7/base64.py
@@ -7,6 +7,7 @@
 
 import re
 import struct
+import string
 import binascii
 
 
@@ -52,7 +53,7 @@
     # Strip off the trailing newline
     encoded = binascii.b2a_base64(s)[:-1]
     if altchars is not None:
-        return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
+        return encoded.translate(string.maketrans(b'+/', altchars[:2]))
     return encoded
 
 
@@ -63,12 +64,13 @@
     length 2 (additional characters are ignored) which specifies the
     alternative alphabet used instead of the '+' and '/' characters.
 
-    The decoded string is returned.  A TypeError is raised if s were
-    incorrectly padded or if there are non-alphabet characters present in the
-    string.
+    The decoded string is returned.  A TypeError is raised if s is
+    incorrectly padded.  Characters that are neither in the normal base-64
+    alphabet nor the alternative alphabet are discarded prior to the padding
+    check.
     """
     if altchars is not None:
-        s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
+        s = s.translate(string.maketrans(altchars[:2], '+/'))
     try:
         return binascii.a2b_base64(s)
     except binascii.Error, msg:
@@ -86,30 +88,35 @@
 def standard_b64decode(s):
     """Decode a string encoded with the standard Base64 alphabet.
 
-    s is the string to decode.  The decoded string is returned.  A TypeError
-    is raised if the string is incorrectly padded or if there are non-alphabet
-    characters present in the string.
+    Argument s is the string to decode.  The decoded string is returned.  A
+    TypeError is raised if the string is incorrectly padded.  Characters that
+    are not in the standard alphabet are discarded prior to the padding
+    check.
     """
     return b64decode(s)
 
-def urlsafe_b64encode(s):
-    """Encode a string using a url-safe Base64 alphabet.
+_urlsafe_encode_translation = string.maketrans(b'+/', b'-_')
+_urlsafe_decode_translation = string.maketrans(b'-_', b'+/')
 
-    s is the string to encode.  The encoded string is returned.  The alphabet
-    uses '-' instead of '+' and '_' instead of '/'.
+def urlsafe_b64encode(s):
+    """Encode a string using the URL- and filesystem-safe Base64 alphabet.
+
+    Argument s is the string to encode.  The encoded string is returned.  The
+    alphabet uses '-' instead of '+' and '_' instead of '/'.
     """
-    return b64encode(s, '-_')
+    return b64encode(s).translate(_urlsafe_encode_translation)
 
 def urlsafe_b64decode(s):
-    """Decode a string encoded with the standard Base64 alphabet.
+    """Decode a string using the URL- and filesystem-safe Base64 alphabet.
 
-    s is the string to decode.  The decoded string is returned.  A TypeError
-    is raised if the string is incorrectly padded or if there are non-alphabet
-    characters present in the string.
+    Argument s is the string to decode.  The decoded string is returned.  A
+    TypeError is raised if the string is incorrectly padded.  Characters that
+    are not in the URL-safe base-64 alphabet, and are not a plus '+' or slash
+    '/', are discarded prior to the padding check.
 
     The alphabet uses '-' instead of '+' and '_' instead of '/'.
     """
-    return b64decode(s, '-_')
+    return b64decode(s.translate(_urlsafe_decode_translation))
 
 
 
@@ -200,7 +207,7 @@
     # False, or the character to map the digit 1 (one) to.  It should be
     # either L (el) or I (eye).
     if map01:
-        s = _translate(s, {'0': 'O', '1': map01})
+        s = s.translate(string.maketrans(b'01', b'O' + map01))
     if casefold:
         s = s.upper()
     # Strip off pad characters from the right.  We need to count the pad
@@ -263,7 +270,7 @@
     a lowercase alphabet is acceptable as input.  For security purposes, the
     default is False.
 
-    The decoded string is returned.  A TypeError is raised if s were
+    The decoded string is returned.  A TypeError is raised if s is
     incorrectly padded or if there are non-alphabet characters present in the
     string.
     """
diff --git a/lib/python2.7/codecs.py b/lib/python2.7/codecs.py
index 049a3f0..3d9be35 100644
--- a/lib/python2.7/codecs.py
+++ b/lib/python2.7/codecs.py
@@ -79,9 +79,19 @@
 ### Codec base classes (defining the API)
 
 class CodecInfo(tuple):
+    """Codec details when looking up the codec registry"""
+
+    # Private API to allow Python to blacklist the known non-Unicode
+    # codecs in the standard library. A more general mechanism to
+    # reliably distinguish test encodings from other codecs will hopefully
+    # be defined for Python 3.5
+    #
+    # See http://bugs.python.org/issue19619
+    _is_text_encoding = True # Assume codecs are text encodings by default
 
     def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
-        incrementalencoder=None, incrementaldecoder=None, name=None):
+        incrementalencoder=None, incrementaldecoder=None, name=None,
+        _is_text_encoding=None):
         self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
         self.name = name
         self.encode = encode
@@ -90,6 +100,8 @@
         self.incrementaldecoder = incrementaldecoder
         self.streamwriter = streamwriter
         self.streamreader = streamreader
+        if _is_text_encoding is not None:
+            self._is_text_encoding = _is_text_encoding
         return self
 
     def __repr__(self):
@@ -126,8 +138,8 @@
             'strict' handling.
 
             The method may not store state in the Codec instance. Use
-            StreamCodec for codecs which have to keep state in order to
-            make encoding/decoding efficient.
+            StreamWriter for codecs which have to keep state in order to
+            make encoding efficient.
 
             The encoder must be able to handle zero length input and
             return an empty object of the output object type in this
@@ -149,8 +161,8 @@
             'strict' handling.
 
             The method may not store state in the Codec instance. Use
-            StreamCodec for codecs which have to keep state in order to
-            make encoding/decoding efficient.
+            StreamReader for codecs which have to keep state in order to
+            make decoding efficient.
 
             The decoder must be able to handle zero length input and
             return an empty object of the output object type in this
diff --git a/lib/python2.7/collections.py b/lib/python2.7/collections.py
index 1dcd233..7ecfd46 100644
--- a/lib/python2.7/collections.py
+++ b/lib/python2.7/collections.py
@@ -1,3 +1,15 @@
+'''This module implements specialized container datatypes providing
+alternatives to Python's general purpose built-in containers, dict,
+list, set, and tuple.
+
+* namedtuple   factory function for creating tuple subclasses with named fields
+* deque        list-like container with fast appends and pops on either end
+* Counter      dict subclass for counting hashable objects
+* OrderedDict  dict subclass that remembers the order entries were added
+* defaultdict  dict subclass that calls a factory function to supply missing values
+
+'''
+
 __all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict']
 # For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
 # They should however be considered an integral part of collections.py.
diff --git a/lib/python2.7/config/Makefile b/lib/python2.7/config/Makefile
index 26bdeaf..ffeb2f7 100644
--- a/lib/python2.7/config/Makefile
+++ b/lib/python2.7/config/Makefile
@@ -26,23 +26,28 @@
 
 # === Variables set by configure
 VERSION=	2.7
-srcdir=		/tmp/python-chaorenl/Python-2.7.10
-VPATH=		/tmp/python-chaorenl/Python-2.7.10
-abs_srcdir=	/tmp/python-chaorenl/Python-2.7.10
-abs_builddir=	/tmp/python-chaorenl/build
+srcdir=		/buildbot/src/googleplex-android/lldb-master-dev/out/python/source
+VPATH=		/buildbot/src/googleplex-android/lldb-master-dev/out/python/source
+abs_srcdir=	/buildbot/src/googleplex-android/lldb-master-dev/out/python/source
+abs_builddir=	/buildbot/src/googleplex-android/lldb-master-dev/out/python/build
 build=		x86_64-unknown-linux-gnu
 host=		x86_64-unknown-linux-gnu
 
-CC=		clang-3.5 -pthread
-CXX=		clang++-3.5 -pthread
+CC=		/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/clang/linux-x86/host/3.6/bin/clang -pthread
+CXX=		/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/clang/linux-x86/host/3.6/bin/clang++ -pthread
 MAINCC=		$(CC)
 LINKCC=		$(PURIFY) $(MAINCC)
 AR=		ar
 RANLIB=		ranlib
-SVNVERSION=	svnversion $(srcdir)
-HGVERSION=	hg id -i $(srcdir)
-HGTAG=		hg id -t $(srcdir)
-HGBRANCH=	hg id -b $(srcdir)
+SVNVERSION=	echo Unversioned directory
+HGVERSION=	
+HGTAG=		
+HGBRANCH=	
+PGO_PROF_GEN_FLAG=-fprofile-instr-generate
+PGO_PROF_USE_FLAG=-fprofile-instr-use=code.profclangd
+LLVM_PROF_MERGER=llvm-profdata merge -output=code.profclangd *.profclangr
+LLVM_PROF_FILE=LLVM_PROFILE_FILE="code-%p.profclangr"
+LLVM_PROF_ERR=yes
 
 GNULD=          yes
 
@@ -68,13 +73,13 @@
 
 # Compiler options
 OPT=		-DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes
-BASECFLAGS=	 -fno-strict-aliasing -OPT:Olimit=0
-CFLAGS=		$(BASECFLAGS) -g -O2 $(OPT) $(EXTRA_CFLAGS)
+BASECFLAGS=	 -fno-strict-aliasing
+CFLAGS=		$(BASECFLAGS) -fuse-ld=gold --gcc-toolchain=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 --sysroot /buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/sysroot -B/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux- -m64  $(OPT) $(EXTRA_CFLAGS)
 # Both CPPFLAGS and LDFLAGS need to contain the shell's value for setup.py to
 # be able to build extension modules using the directories specified in the
 # environment variables
 CPPFLAGS=	-I. -IInclude -I$(srcdir)/Include 
-LDFLAGS=	 -m64
+LDFLAGS=	-fuse-ld=gold --gcc-toolchain=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 --sysroot /buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/sysroot -B/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux- -m64  -Wl,-rpath,\$$ORIGIN/../lib:\$$ORIGIN/../..
 LDLAST=		
 SGI_ABI=	
 CCSHARED=	-fPIC
@@ -93,7 +98,7 @@
 MULTIARCH=	
 
 # Install prefix for architecture-independent files
-prefix=		/tmp/python-chaorenl/install
+prefix=		
 
 # Install prefix for architecture-dependent files
 exec_prefix=	${prefix}
@@ -143,7 +148,7 @@
 OTHER_LIBTOOL_OPT=
 
 # Environment to run shared python without installed libraries
-RUNSHARED=       LD_LIBRARY_PATH=/tmp/python-chaorenl/build
+RUNSHARED=       LD_LIBRARY_PATH=/buildbot/src/googleplex-android/lldb-master-dev/out/python/build
 
 # ensurepip options
 ENSUREPIP=      no
@@ -155,7 +160,7 @@
 FILEMODE=	644
 
 # configure script arguments
-CONFIG_ARGS=	 '--prefix=/tmp/python-chaorenl/install' '--enable-unicode=ucs4' '--enable-shared' 'CC=clang-3.5' 'LDFLAGS= -m64'
+CONFIG_ARGS=	 '--prefix=' '--enable-unicode=ucs4' '--enable-shared' 'CC=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/clang/linux-x86/host/3.6/bin/clang' 'CFLAGS=-fuse-ld=gold --gcc-toolchain=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 --sysroot /buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/sysroot -B/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux- -m64 ' 'LDFLAGS=-fuse-ld=gold --gcc-toolchain=/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 --sysroot /buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/sysroot -B/buildbot/src/googleplex-android/lldb-master-dev/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux- -m64  -Wl,-rpath,\$$ORIGIN/../lib:\$$ORIGIN/../..'
 
 
 # Subdirectories with code
@@ -205,8 +210,12 @@
 TCLTK_LIBS=	
 
 # The task to run while instrument when building the profile-opt target
-PROFILE_TASK=	$(srcdir)/Tools/pybench/pybench.py -n 2 --with-gc --with-syscheck
-#PROFILE_TASK=	$(srcdir)/Lib/test/regrtest.py
+PROFILE_TASK=-m test.regrtest --pgo
+
+# report files for gcov / lcov coverage report
+COVERAGE_INFO=	$(abs_builddir)/coverage.info
+COVERAGE_REPORT=$(abs_builddir)/lcov-report
+COVERAGE_REPORT_OPTIONS=--no-branch-coverage --title "CPython lcov report"
 
 # === Definitions added by makesetup ===
 
@@ -317,6 +326,16 @@
 
 ##########################################################################
 # Python
+
+OPCODETARGETS_H= \
+		$(srcdir)/Python/opcode_targets.h
+		
+OPCODETARGETGEN= \
+		$(srcdir)/Python/makeopcodetargets.py
+
+OPCODETARGETGEN_FILES= \
+		$(OPCODETARGETGEN) $(srcdir)/Lib/opcode.py
+
 PYTHON_OBJS=	\
 		Python/_warnings.o \
 		Python/Python-ast.o \
@@ -427,33 +446,79 @@
 all:		build_all
 build_all:	$(BUILDPYTHON) oldsharedmods sharedmods gdbhooks
 
-# Compile a binary with gcc profile guided optimization.
+# Compile a binary with profile guided optimization.
 profile-opt:
+	@if [ $(LLVM_PROF_ERR) == yes ]; then \
+		echo "Error: Cannot perform PGO build because llvm-profdata was not found in PATH" ;\
+		echo "Please add it to PATH and run ./configure again" ;\
+		exit 1;\
+	fi
 	@echo "Building with support for profile generation:"
 	$(MAKE) clean
-	$(MAKE) build_all_generate_profile
-	@echo "Running benchmark to generate profile data:"
 	$(MAKE) profile-removal
+	$(MAKE) build_all_generate_profile
+	$(MAKE) profile-removal
+	@echo "Running code to generate profile data (this can take a while):"
 	$(MAKE) run_profile_task
+	$(MAKE) build_all_merge_profile
 	@echo "Rebuilding with profile guided optimizations:"
 	$(MAKE) clean
 	$(MAKE) build_all_use_profile
+	$(MAKE) profile-removal
 
 build_all_generate_profile:
-	$(MAKE) all CFLAGS="$(CFLAGS) -fprofile-generate" LIBS="$(LIBS) -lgcov"
+	$(MAKE) all CFLAGS="$(CFLAGS) $(PGO_PROF_GEN_FLAG)" LDFLAGS="$(LDFLAGS) $(PGO_PROF_GEN_FLAG)" LIBS="$(LIBS)"
 
 run_profile_task:
 	: # FIXME: can't run for a cross build
-	./$(BUILDPYTHON) $(PROFILE_TASK)
+	$(LLVM_PROF_FILE) ./$(BUILDPYTHON) $(PROFILE_TASK) || true
+
+build_all_merge_profile:
+	$(LLVM_PROF_MERGER)
 
 build_all_use_profile:
-	$(MAKE) all CFLAGS="$(CFLAGS) -fprofile-use"
+	$(MAKE) all CFLAGS="$(CFLAGS) $(PGO_PROF_USE_FLAG)"
 
+# Compile and run with gcov
+.PHONY=coverage coverage-lcov coverage-report
 coverage:
 	@echo "Building with support for coverage checking:"
-	$(MAKE) clean
+	$(MAKE) clean profile-removal
 	$(MAKE) all CFLAGS="$(CFLAGS) -O0 -pg -fprofile-arcs -ftest-coverage" LIBS="$(LIBS) -lgcov"
 
+coverage-lcov:
+	@echo "Creating Coverage HTML report with LCOV:"
+	@rm -f $(COVERAGE_INFO)
+	@rm -rf $(COVERAGE_REPORT)
+	@lcov --capture --directory $(abs_builddir) \
+	    --base-directory $(realpath $(abs_builddir)) \
+	    --path $(realpath $(abs_srcdir)) \
+	    --output-file $(COVERAGE_INFO)
+	: # remove 3rd party modules and system headers
+	@lcov --remove $(COVERAGE_INFO) \
+	    '*/Modules/_ctypes/libffi*/*' \
+	    '*/Modules/expat/*' \
+	    '*/Modules/zlib/*' \
+	    '*/Include/*' \
+	    '/usr/include/*' \
+	    '/usr/local/include/*' \
+	    --output-file $(COVERAGE_INFO)
+	@genhtml $(COVERAGE_INFO) --output-directory $(COVERAGE_REPORT) \
+	    $(COVERAGE_REPORT_OPTIONS)
+	@echo
+	@echo "lcov report at $(COVERAGE_REPORT)/index.html"
+	@echo
+
+coverage-report:
+	: # force rebuilding of parser
+	@touch $(GRAMMAR_INPUT)
+	: # build with coverage info
+	$(MAKE) coverage
+	: # run tests, ignore failures
+	$(TESTPYTHON) $(TESTPROG) $(TESTOPTS) || true
+	: # build lcov report
+	$(MAKE) coverage-lcov
+
 
 # Build the interpreter
 $(BUILDPYTHON):	Modules/python.o $(LIBRARY) $(LDLIBRARY)
@@ -480,11 +545,15 @@
 		exit 1 ; \
 	fi
 
+# This is shared by the math and cmath modules
+Modules/_math.o: Modules/_math.c Modules/_math.h
+	$(CC) -c $(CCSHARED) $(PY_CFLAGS) -o $@ $<
+
 # Build the shared modules
 # Under GNU make, MAKEFLAGS are sorted and normalized; the 's' for
 # -s, --silent or --quiet is always the first char.
 # Under BSD make, MAKEFLAGS might be " -s -v x=y".
-sharedmods: $(BUILDPYTHON) pybuilddir.txt
+sharedmods: $(BUILDPYTHON) pybuilddir.txt Modules/_math.o
 	@case "$$MAKEFLAGS" in \
 	    *\ -s*|s*) quiet="-q";; \
 	    *) quiet="";; \
@@ -688,6 +757,11 @@
 Objects/stringobject.o: $(srcdir)/Objects/stringobject.c \
 				$(STRINGLIB_HEADERS)
 
+$(OPCODETARGETS_H): $(OPCODETARGETGEN_FILES)
+	$(OPCODETARGETGEN) $(OPCODETARGETS_H)
+
+Python/ceval.o: $(OPCODETARGETS_H)
+
 Python/formatter_unicode.o: $(srcdir)/Python/formatter_unicode.c \
 				$(STRINGLIB_HEADERS)
 
@@ -901,6 +975,10 @@
 #  $(PYTHON) -> python2 -> python$(VERSION))
 # Also create equivalent chains for other installed files
 bininstall:	altbininstall
+	if test ! -d $(DESTDIR)$(LIBPC); then \
+		echo "Creating directory $(LIBPC)"; \
+		$(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$(LIBPC); \
+	fi
 	-if test -f $(DESTDIR)$(BINDIR)/$(PYTHON) -o -h $(DESTDIR)$(BINDIR)/$(PYTHON); \
 	then rm -f $(DESTDIR)$(BINDIR)/$(PYTHON); \
 	else true; \
@@ -1332,9 +1410,12 @@
 	find build -name 'fficonfig.h' -exec rm -f {} ';' || true
 	find build -name 'fficonfig.py' -exec rm -f {} ';' || true
 	-rm -f Lib/lib2to3/*Grammar*.pickle
+	-rm -rf build
 
 profile-removal:
 	find . -name '*.gc??' -exec rm -f {} ';'
+	find . -name '*.profclang?' -exec rm -f {} ';'
+	find . -name '*.dyn' -exec rm -f {} ';'
 
 clobber: clean profile-removal
 	-rm -f $(BUILDPYTHON) $(PGEN) $(LIBRARY) $(LDLIBRARY) $(DLLLIBRARY) \
diff --git a/lib/python2.7/config/config.c b/lib/python2.7/config/config.c
index 7634b89..b1fa555 100644
--- a/lib/python2.7/config/config.c
+++ b/lib/python2.7/config/config.c
@@ -1,4 +1,4 @@
-/* Generated automatically from /tmp/python-chaorenl/Python-2.7.10/Modules/config.c.in by makesetup. */
+/* Generated automatically from /buildbot/src/googleplex-android/lldb-master-dev/out/python/source/Modules/config.c.in by makesetup. */
 /* -*- C -*- ***********************************************
 Copyright (c) 2000, BeOpen.com.
 Copyright (c) 1995-2000, Corporation for National Research Initiatives.
diff --git a/lib/python2.7/config/libpython2.7.a b/lib/python2.7/config/libpython2.7.a
index e606690..5656f97 100644
--- a/lib/python2.7/config/libpython2.7.a
+++ b/lib/python2.7/config/libpython2.7.a
Binary files differ
diff --git a/lib/python2.7/config/python.o b/lib/python2.7/config/python.o
index 717af56..f4e79e2 100644
--- a/lib/python2.7/config/python.o
+++ b/lib/python2.7/config/python.o
Binary files differ
diff --git a/lib/python2.7/cookielib.py b/lib/python2.7/cookielib.py
index f2df467..eb9aec4 100644
--- a/lib/python2.7/cookielib.py
+++ b/lib/python2.7/cookielib.py
@@ -1434,7 +1434,7 @@
                         break
                     # convert RFC 2965 Max-Age to seconds since epoch
                     # XXX Strictly you're supposed to follow RFC 2616
-                    #   age-calculation rules.  Remember that zero Max-Age is a
+                    #   age-calculation rules.  Remember that zero Max-Age
                     #   is a request to discard (old and new) cookie, though.
                     k = "expires"
                     v = self._now + v
diff --git a/lib/python2.7/copy.py b/lib/python2.7/copy.py
index c227a2e..daf81a3 100644
--- a/lib/python2.7/copy.py
+++ b/lib/python2.7/copy.py
@@ -315,7 +315,7 @@
     if n > 2:
         state = info[2]
     else:
-        state = {}
+        state = None
     if n > 3:
         listiter = info[3]
     else:
@@ -329,7 +329,7 @@
     y = callable(*args)
     memo[id(x)] = y
 
-    if state:
+    if state is not None:
         if deep:
             state = deepcopy(state, memo)
         if hasattr(y, '__setstate__'):
diff --git a/lib/python2.7/ctypes/__init__.py b/lib/python2.7/ctypes/__init__.py
index 4e97c15..77b020a 100644
--- a/lib/python2.7/ctypes/__init__.py
+++ b/lib/python2.7/ctypes/__init__.py
@@ -386,8 +386,8 @@
         return func
 
 class PyDLL(CDLL):
-    """This class represents the Python library itself.  It allows to
-    access Python API functions.  The GIL is not released, and
+    """This class represents the Python library itself.  It allows
+    accessing Python API functions.  The GIL is not released, and
     Python exceptions are handled correctly.
     """
     _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
diff --git a/lib/python2.7/ctypes/test/test_arrays.py b/lib/python2.7/ctypes/test/test_arrays.py
index 47e95ee..49aaab5 100644
--- a/lib/python2.7/ctypes/test/test_arrays.py
+++ b/lib/python2.7/ctypes/test/test_arrays.py
@@ -24,20 +24,24 @@
             self.assertEqual(len(ia), alen)
 
             # slot values ok?
-            values = [ia[i] for i in range(len(init))]
+            values = [ia[i] for i in range(alen)]
             self.assertEqual(values, init)
 
+            # out-of-bounds accesses should be caught
+            with self.assertRaises(IndexError): ia[alen]
+            with self.assertRaises(IndexError): ia[-alen-1]
+
             # change the items
             from operator import setitem
             new_values = range(42, 42+alen)
             [setitem(ia, n, new_values[n]) for n in range(alen)]
-            values = [ia[i] for i in range(len(init))]
+            values = [ia[i] for i in range(alen)]
             self.assertEqual(values, new_values)
 
             # are the items initialized to 0?
             ia = int_array()
-            values = [ia[i] for i in range(len(init))]
-            self.assertEqual(values, [0] * len(init))
+            values = [ia[i] for i in range(alen)]
+            self.assertEqual(values, [0] * alen)
 
             # Too many initializers should be caught
             self.assertRaises(IndexError, int_array, *range(alen*2))
diff --git a/lib/python2.7/ctypes/test/test_bitfields.py b/lib/python2.7/ctypes/test/test_bitfields.py
index 991dbe8..a854d2b 100644
--- a/lib/python2.7/ctypes/test/test_bitfields.py
+++ b/lib/python2.7/ctypes/test/test_bitfields.py
@@ -259,5 +259,33 @@
         x.a = 0xFEDCBA9876543211
         self.assertEqual(x.a, 0xFEDCBA9876543211)
 
+    @need_symbol('c_uint32')
+    def test_uint32_swap_little_endian(self):
+        # Issue #23319
+        class Little(LittleEndianStructure):
+            _fields_ = [("a", c_uint32, 24),
+                        ("b", c_uint32, 4),
+                        ("c", c_uint32, 4)]
+        b = bytearray(4)
+        x = Little.from_buffer(b)
+        x.a = 0xabcdef
+        x.b = 1
+        x.c = 2
+        self.assertEqual(b, b'\xef\xcd\xab\x21')
+
+    @need_symbol('c_uint32')
+    def test_uint32_swap_big_endian(self):
+        # Issue #23319
+        class Big(BigEndianStructure):
+            _fields_ = [("a", c_uint32, 24),
+                        ("b", c_uint32, 4),
+                        ("c", c_uint32, 4)]
+        b = bytearray(4)
+        x = Big.from_buffer(b)
+        x.a = 0xabcdef
+        x.b = 1
+        x.c = 2
+        self.assertEqual(b, b'\xab\xcd\xef\x12')
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/lib/python2.7/ctypes/test/test_pointers.py b/lib/python2.7/ctypes/test/test_pointers.py
index 5cdde68..24b0546 100644
--- a/lib/python2.7/ctypes/test/test_pointers.py
+++ b/lib/python2.7/ctypes/test/test_pointers.py
@@ -53,9 +53,13 @@
         # C code:
         #   int x = 12321;
         #   res = &x
-        res.contents = c_int(12321)
+        x = c_int(12321)
+        res.contents = x
         self.assertEqual(i.value, 54345)
 
+        x.value = -99
+        self.assertEqual(res.contents.value, -99)
+
     def test_callbacks_with_pointers(self):
         # a function type receiving a pointer
         PROTOTYPE = CFUNCTYPE(c_int, POINTER(c_int))
@@ -128,9 +132,10 @@
 
     def test_basic(self):
         p = pointer(c_int(42))
-        # Although a pointer can be indexed, it ha no length
+        # Although a pointer can be indexed, it has no length
         self.assertRaises(TypeError, len, p)
         self.assertEqual(p[0], 42)
+        self.assertEqual(p[0:1], [42])
         self.assertEqual(p.contents.value, 42)
 
     def test_charpp(self):
@@ -192,9 +197,19 @@
         LargeNamedType = type('T' * 2 ** 25, (Structure,), {})
         self.assertTrue(POINTER(LargeNamedType))
 
+        # to not leak references, we must clean _pointer_type_cache
+        from ctypes import _pointer_type_cache
+        del _pointer_type_cache[LargeNamedType]
+
     def test_pointer_type_str_name(self):
         large_string = 'T' * 2 ** 25
-        self.assertTrue(POINTER(large_string))
+        P = POINTER(large_string)
+        self.assertTrue(P)
+
+        # to not leak references, we must clean _pointer_type_cache
+        from ctypes import _pointer_type_cache
+        del _pointer_type_cache[id(P)]
+
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/lib/python2.7/ctypes/test/test_random_things.py b/lib/python2.7/ctypes/test/test_random_things.py
index 0caffe3..d72b7c8 100644
--- a/lib/python2.7/ctypes/test/test_random_things.py
+++ b/lib/python2.7/ctypes/test/test_random_things.py
@@ -30,7 +30,7 @@
     # value is printed correctly.
     #
     # Changed in 0.9.3: No longer is '(in callback)' prepended to the
-    # error message - instead a additional frame for the C code is
+    # error message - instead an additional frame for the C code is
     # created, then a full traceback printed.  When SystemExit is
     # raised in a callback function, the interpreter exits.
 
diff --git a/lib/python2.7/ctypes/test/test_values.py b/lib/python2.7/ctypes/test/test_values.py
index 14d69fe..fe7dcf0 100644
--- a/lib/python2.7/ctypes/test/test_values.py
+++ b/lib/python2.7/ctypes/test/test_values.py
@@ -22,8 +22,7 @@
         ctdll = CDLL(_ctypes_test.__file__)
         self.assertRaises(ValueError, c_int.in_dll, ctdll, "Undefined_Symbol")
 
-@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific test')
-class Win_ValuesTestCase(unittest.TestCase):
+class PythonValuesTestCase(unittest.TestCase):
     """This test only works when python itself is a dll/shared library"""
 
     def test_optimizeflag(self):
diff --git a/lib/python2.7/ctypes/test/test_win32.py b/lib/python2.7/ctypes/test/test_win32.py
index ff08386..d22e139 100644
--- a/lib/python2.7/ctypes/test/test_win32.py
+++ b/lib/python2.7/ctypes/test/test_win32.py
@@ -114,5 +114,9 @@
             self.assertEqual(ret.top, top.value)
             self.assertEqual(ret.bottom, bottom.value)
 
+        # to not leak references, we must clean _pointer_type_cache
+        from ctypes import _pointer_type_cache
+        del _pointer_type_cache[RECT]
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/lib/python2.7/distutils/__init__.py b/lib/python2.7/distutils/__init__.py
index 0370896..d823d04 100644
--- a/lib/python2.7/distutils/__init__.py
+++ b/lib/python2.7/distutils/__init__.py
@@ -8,12 +8,6 @@
    setup (...)
 """
 
-__revision__ = "$Id$"
+import sys
 
-# Distutils version
-#
-# Updated automatically by the Python release process.
-#
-#--start constants--
-__version__ = "2.7.10"
-#--end constants--
+__version__ = sys.version[:sys.version.index(' ')]
diff --git a/lib/python2.7/distutils/ccompiler.py b/lib/python2.7/distutils/ccompiler.py
index 4907a0a..62506a6 100644
--- a/lib/python2.7/distutils/ccompiler.py
+++ b/lib/python2.7/distutils/ccompiler.py
@@ -718,7 +718,7 @@
         raise NotImplementedError
 
     def library_option(self, lib):
-        """Return the compiler option to add 'dir' to the list of libraries
+        """Return the compiler option to add 'lib' to the list of libraries
         linked into the shared library or executable.
         """
         raise NotImplementedError
@@ -842,8 +842,9 @@
     def library_filename(self, libname, lib_type='static',     # or 'shared'
                          strip_dir=0, output_dir=''):
         assert output_dir is not None
-        if lib_type not in ("static", "shared", "dylib"):
-            raise ValueError, "'lib_type' must be \"static\", \"shared\" or \"dylib\""
+        if lib_type not in ("static", "shared", "dylib", "xcode_stub"):
+            raise ValueError, ("""'lib_type' must be "static", "shared", """
+                               """"dylib", or "xcode_stub".""")
         fmt = getattr(self, lib_type + "_lib_format")
         ext = getattr(self, lib_type + "_lib_extension")
 
diff --git a/lib/python2.7/distutils/command/build_ext.py b/lib/python2.7/distutils/command/build_ext.py
index 2ab73aa..3a49454 100644
--- a/lib/python2.7/distutils/command/build_ext.py
+++ b/lib/python2.7/distutils/command/build_ext.py
@@ -199,10 +199,12 @@
                 else:
                     # win-amd64 or win-ia64
                     suffix = self.plat_name[4:]
-                new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
-                if suffix:
-                    new_lib = os.path.join(new_lib, suffix)
-                self.library_dirs.append(new_lib)
+                # We could have been built in one of two places; add both
+                for d in ('PCbuild',), ('PC', 'VS9.0'):
+                    new_lib = os.path.join(sys.exec_prefix, *d)
+                    if suffix:
+                        new_lib = os.path.join(new_lib, suffix)
+                    self.library_dirs.append(new_lib)
 
             elif MSVC_VERSION == 8:
                 self.library_dirs.append(os.path.join(sys.exec_prefix,
diff --git a/lib/python2.7/distutils/msvc9compiler.py b/lib/python2.7/distutils/msvc9compiler.py
index 7ec9b92..22de4ba 100644
--- a/lib/python2.7/distutils/msvc9compiler.py
+++ b/lib/python2.7/distutils/msvc9compiler.py
@@ -426,7 +426,7 @@
         self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
         if self.__version >= 7:
             self.ldflags_shared_debug = [
-                '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None'
+                '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
                 ]
         self.ldflags_static = [ '/nologo']
 
diff --git a/lib/python2.7/distutils/tests/test_core.py b/lib/python2.7/distutils/tests/test_core.py
index 0d979bc..e6d4c9b 100644
--- a/lib/python2.7/distutils/tests/test_core.py
+++ b/lib/python2.7/distutils/tests/test_core.py
@@ -9,6 +9,7 @@
 from test.test_support import captured_stdout, run_unittest
 import unittest
 from distutils.tests import support
+from distutils import log
 
 # setup script that uses __file__
 setup_using___file__ = """\
@@ -36,6 +37,7 @@
         self.old_stdout = sys.stdout
         self.cleanup_testfn()
         self.old_argv = sys.argv, sys.argv[:]
+        self.addCleanup(log.set_threshold, log._global_log.threshold)
 
     def tearDown(self):
         sys.stdout = self.old_stdout
diff --git a/lib/python2.7/distutils/tests/test_dist.py b/lib/python2.7/distutils/tests/test_dist.py
index eb9b0ef..378fe43 100644
--- a/lib/python2.7/distutils/tests/test_dist.py
+++ b/lib/python2.7/distutils/tests/test_dist.py
@@ -13,6 +13,7 @@
 import distutils.dist
 from test.test_support import TESTFN, captured_stdout, run_unittest, unlink
 from distutils.tests import support
+from distutils import log
 
 
 class test_dist(Command):
@@ -397,6 +398,7 @@
 
     def test_show_help(self):
         # smoke test, just makes sure some help is displayed
+        self.addCleanup(log.set_threshold, log._global_log.threshold)
         dist = Distribution()
         sys.argv = []
         dist.help = 1
diff --git a/lib/python2.7/distutils/unixccompiler.py b/lib/python2.7/distutils/unixccompiler.py
index 2aa1cb1..4c35676 100644
--- a/lib/python2.7/distutils/unixccompiler.py
+++ b/lib/python2.7/distutils/unixccompiler.py
@@ -79,7 +79,9 @@
     static_lib_extension = ".a"
     shared_lib_extension = ".so"
     dylib_lib_extension = ".dylib"
+    xcode_stub_lib_extension = ".tbd"
     static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
+    xcode_stub_lib_format = dylib_lib_format
     if sys.platform == "cygwin":
         exe_extension = ".exe"
 
@@ -245,12 +247,28 @@
     def find_library_file(self, dirs, lib, debug=0):
         shared_f = self.library_filename(lib, lib_type='shared')
         dylib_f = self.library_filename(lib, lib_type='dylib')
+        xcode_stub_f = self.library_filename(lib, lib_type='xcode_stub')
         static_f = self.library_filename(lib, lib_type='static')
 
         if sys.platform == 'darwin':
             # On OSX users can specify an alternate SDK using
             # '-isysroot', calculate the SDK root if it is specified
             # (and use it further on)
+            #
+            # Note that, as of Xcode 7, Apple SDKs may contain textual stub
+            # libraries with .tbd extensions rather than the normal .dylib
+            # shared libraries installed in /.  The Apple compiler tool
+            # chain handles this transparently but it can cause problems
+            # for programs that are being built with an SDK and searching
+            # for specific libraries.  Callers of find_library_file need to
+            # keep in mind that the base filename of the returned SDK library
+            # file might have a different extension from that of the library
+            # file installed on the running system, for example:
+            #   /Applications/Xcode.app/Contents/Developer/Platforms/
+            #       MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/
+            #       usr/lib/libedit.tbd
+            # vs
+            #   /usr/lib/libedit.dylib
             cflags = sysconfig.get_config_var('CFLAGS')
             m = re.search(r'-isysroot\s+(\S+)', cflags)
             if m is None:
@@ -264,6 +282,7 @@
             shared = os.path.join(dir, shared_f)
             dylib = os.path.join(dir, dylib_f)
             static = os.path.join(dir, static_f)
+            xcode_stub = os.path.join(dir, xcode_stub_f)
 
             if sys.platform == 'darwin' and (
                 dir.startswith('/System/') or (
@@ -272,6 +291,7 @@
                 shared = os.path.join(sysroot, dir[1:], shared_f)
                 dylib = os.path.join(sysroot, dir[1:], dylib_f)
                 static = os.path.join(sysroot, dir[1:], static_f)
+                xcode_stub = os.path.join(sysroot, dir[1:], xcode_stub_f)
 
             # We're second-guessing the linker here, with not much hard
             # data to go on: GCC seems to prefer the shared library, so I'm
@@ -279,6 +299,8 @@
             # ignoring even GCC's "-static" option.  So sue me.
             if os.path.exists(dylib):
                 return dylib
+            elif os.path.exists(xcode_stub):
+                return xcode_stub
             elif os.path.exists(shared):
                 return shared
             elif os.path.exists(static):
diff --git a/lib/python2.7/email/test/test_email.py b/lib/python2.7/email/test/test_email.py
index c11e84b..160306c 100644
--- a/lib/python2.7/email/test/test_email.py
+++ b/lib/python2.7/email/test/test_email.py
@@ -12,6 +12,10 @@
 import textwrap
 from cStringIO import StringIO
 from random import choice
+try:
+    from threading import Thread
+except ImportError:
+    from dummy_threading import Thread
 
 import email
 
@@ -33,7 +37,7 @@
 from email import base64MIME
 from email import quopriMIME
 
-from test.test_support import findfile, run_unittest
+from test.test_support import findfile, run_unittest, start_threads
 from email.test import __file__ as landmark
 
 
@@ -2412,6 +2416,25 @@
         addrs = Utils.getaddresses(['User ((nested comment)) <foo@bar.com>'])
         eq(addrs[0][1], 'foo@bar.com')
 
+    def test_make_msgid_collisions(self):
+        # Test make_msgid uniqueness, even with multiple threads
+        class MsgidsThread(Thread):
+            def run(self):
+                # generate msgids for 3 seconds
+                self.msgids = []
+                append = self.msgids.append
+                make_msgid = Utils.make_msgid
+                clock = time.time
+                tfin = clock() + 3.0
+                while clock() < tfin:
+                    append(make_msgid())
+
+        threads = [MsgidsThread() for i in range(5)]
+        with start_threads(threads):
+            pass
+        all_ids = sum([t.msgids for t in threads], [])
+        self.assertEqual(len(set(all_ids)), len(all_ids))
+
     def test_utils_quote_unquote(self):
         eq = self.assertEqual
         msg = Message()
diff --git a/lib/python2.7/email/utils.py b/lib/python2.7/email/utils.py
index c976021..ac13f49 100644
--- a/lib/python2.7/email/utils.py
+++ b/lib/python2.7/email/utils.py
@@ -177,21 +177,20 @@
 def make_msgid(idstring=None):
     """Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
 
-    <20020201195627.33539.96671@nightshade.la.mastaler.com>
+    <142480216486.20800.16526388040877946887@nightshade.la.mastaler.com>
 
     Optional idstring if given is a string used to strengthen the
     uniqueness of the message id.
     """
-    timeval = time.time()
-    utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
+    timeval = int(time.time()*100)
     pid = os.getpid()
-    randint = random.randrange(100000)
+    randint = random.getrandbits(64)
     if idstring is None:
         idstring = ''
     else:
         idstring = '.' + idstring
     idhost = socket.getfqdn()
-    msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
+    msgid = '<%d.%d.%d%s@%s>' % (timeval, pid, randint, idstring, idhost)
     return msgid
 
 
diff --git a/lib/python2.7/encodings/base64_codec.py b/lib/python2.7/encodings/base64_codec.py
index f84e780..34ac555 100644
--- a/lib/python2.7/encodings/base64_codec.py
+++ b/lib/python2.7/encodings/base64_codec.py
@@ -76,4 +76,5 @@
         incrementaldecoder=IncrementalDecoder,
         streamwriter=StreamWriter,
         streamreader=StreamReader,
+        _is_text_encoding=False,
     )
diff --git a/lib/python2.7/encodings/bz2_codec.py b/lib/python2.7/encodings/bz2_codec.py
index 054b36b..136503a 100644
--- a/lib/python2.7/encodings/bz2_codec.py
+++ b/lib/python2.7/encodings/bz2_codec.py
@@ -99,4 +99,5 @@
         incrementaldecoder=IncrementalDecoder,
         streamwriter=StreamWriter,
         streamreader=StreamReader,
+        _is_text_encoding=False,
     )
diff --git a/lib/python2.7/encodings/hex_codec.py b/lib/python2.7/encodings/hex_codec.py
index 91b38d9..154488c 100644
--- a/lib/python2.7/encodings/hex_codec.py
+++ b/lib/python2.7/encodings/hex_codec.py
@@ -76,4 +76,5 @@
         incrementaldecoder=IncrementalDecoder,
         streamwriter=StreamWriter,
         streamreader=StreamReader,
+        _is_text_encoding=False,
     )
diff --git a/lib/python2.7/encodings/quopri_codec.py b/lib/python2.7/encodings/quopri_codec.py
index d8683fd..e4965da 100644
--- a/lib/python2.7/encodings/quopri_codec.py
+++ b/lib/python2.7/encodings/quopri_codec.py
@@ -21,7 +21,7 @@
     # using str() because of cStringIO's Unicode undesired Unicode behavior.
     f = StringIO(str(input))
     g = StringIO()
-    quopri.encode(f, g, 1)
+    quopri.encode(f, g, quotetabs=True)
     output = g.getvalue()
     return (output, len(input))
 
@@ -72,4 +72,5 @@
         incrementaldecoder=IncrementalDecoder,
         streamwriter=StreamWriter,
         streamreader=StreamReader,
+        _is_text_encoding=False,
     )
diff --git a/lib/python2.7/encodings/rot_13.py b/lib/python2.7/encodings/rot_13.py
index 52b6431..4eaf433 100755
--- a/lib/python2.7/encodings/rot_13.py
+++ b/lib/python2.7/encodings/rot_13.py
@@ -44,6 +44,7 @@
         incrementaldecoder=IncrementalDecoder,
         streamwriter=StreamWriter,
         streamreader=StreamReader,
+        _is_text_encoding=False,
     )
 
 ### Decoding Map
diff --git a/lib/python2.7/encodings/uu_codec.py b/lib/python2.7/encodings/uu_codec.py
index 4b137a5..5cb0d2b 100644
--- a/lib/python2.7/encodings/uu_codec.py
+++ b/lib/python2.7/encodings/uu_codec.py
@@ -126,4 +126,5 @@
         incrementaldecoder=IncrementalDecoder,
         streamreader=StreamReader,
         streamwriter=StreamWriter,
+        _is_text_encoding=False,
     )
diff --git a/lib/python2.7/encodings/zlib_codec.py b/lib/python2.7/encodings/zlib_codec.py
index 3419f9f..0c2599d 100644
--- a/lib/python2.7/encodings/zlib_codec.py
+++ b/lib/python2.7/encodings/zlib_codec.py
@@ -99,4 +99,5 @@
         incrementaldecoder=IncrementalDecoder,
         streamreader=StreamReader,
         streamwriter=StreamWriter,
+        _is_text_encoding=False,
     )
diff --git a/lib/python2.7/ensurepip/__init__.py b/lib/python2.7/ensurepip/__init__.py
index e7df79a..48b9fda 100644
--- a/lib/python2.7/ensurepip/__init__.py
+++ b/lib/python2.7/ensurepip/__init__.py
@@ -12,9 +12,9 @@
 __all__ = ["version", "bootstrap"]
 
 
-_SETUPTOOLS_VERSION = "15.2"
+_SETUPTOOLS_VERSION = "20.3"
 
-_PIP_VERSION = "6.1.1"
+_PIP_VERSION = "8.1.1"
 
 # pip currently requires ssl support, so we try to provide a nicer
 # error message when that is missing (http://bugs.python.org/issue19744)
@@ -147,7 +147,7 @@
     _disable_pip_configuration_settings()
 
     # Construct the arguments to be passed to the pip command
-    args = ["uninstall", "-y"]
+    args = ["uninstall", "-y", "--disable-pip-version-check"]
     if verbosity:
         args += ["-" + "v" * verbosity]
 
diff --git a/lib/python2.7/ensurepip/_bundled/pip-6.1.1-py2.py3-none-any.whl b/lib/python2.7/ensurepip/_bundled/pip-6.1.1-py2.py3-none-any.whl
deleted file mode 100644
index e59694a..0000000
--- a/lib/python2.7/ensurepip/_bundled/pip-6.1.1-py2.py3-none-any.whl
+++ /dev/null
Binary files differ
diff --git a/lib/python2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl b/lib/python2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl
new file mode 100644
index 0000000..8632eb7
--- /dev/null
+++ b/lib/python2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl
Binary files differ
diff --git a/lib/python2.7/ensurepip/_bundled/setuptools-15.2-py2.py3-none-any.whl b/lib/python2.7/ensurepip/_bundled/setuptools-20.3-py2.py3-none-any.whl
similarity index 60%
rename from lib/python2.7/ensurepip/_bundled/setuptools-15.2-py2.py3-none-any.whl
rename to lib/python2.7/ensurepip/_bundled/setuptools-20.3-py2.py3-none-any.whl
index f153ed3..39e67c0 100644
--- a/lib/python2.7/ensurepip/_bundled/setuptools-15.2-py2.py3-none-any.whl
+++ b/lib/python2.7/ensurepip/_bundled/setuptools-20.3-py2.py3-none-any.whl
Binary files differ
diff --git a/lib/python2.7/fileinput.py b/lib/python2.7/fileinput.py
index 02dc4c6..b2e2f05 100644
--- a/lib/python2.7/fileinput.py
+++ b/lib/python2.7/fileinput.py
@@ -64,13 +64,6 @@
 disabled when standard input is read.  XXX The current implementation
 does not work for MS-DOS 8+3 filesystems.
 
-Performance: this module is unfortunately one of the slower ways of
-processing large numbers of input lines.  Nevertheless, a significant
-speed-up has been obtained by using readlines(bufsize) instead of
-readline().  A new keyword argument, bufsize=N, is present on the
-input() function and the FileInput() class to override the default
-buffer size.
-
 XXX Possible additions:
 
 - optional getopt argument processing
@@ -86,6 +79,7 @@
 
 _state = None
 
+# No longer used
 DEFAULT_BUFSIZE = 8*1024
 
 def input(files=None, inplace=0, backup="", bufsize=0,
@@ -207,17 +201,14 @@
         self._files = files
         self._inplace = inplace
         self._backup = backup
-        self._bufsize = bufsize or DEFAULT_BUFSIZE
         self._savestdout = None
         self._output = None
         self._filename = None
-        self._lineno = 0
+        self._startlineno = 0
         self._filelineno = 0
         self._file = None
         self._isstdin = False
         self._backupfilename = None
-        self._buffer = []
-        self._bufindex = 0
         # restrict mode argument to reading modes
         if mode not in ('r', 'rU', 'U', 'rb'):
             raise ValueError("FileInput opening mode must be one of "
@@ -242,22 +233,18 @@
         return self
 
     def next(self):
-        try:
-            line = self._buffer[self._bufindex]
-        except IndexError:
-            pass
-        else:
-            self._bufindex += 1
-            self._lineno += 1
-            self._filelineno += 1
-            return line
-        line = self.readline()
-        if not line:
-            raise StopIteration
-        return line
+        while 1:
+            line = self._readline()
+            if line:
+                self._filelineno += 1
+                return line
+            if not self._file:
+                raise StopIteration
+            self.nextfile()
+            # repeat with next file
 
     def __getitem__(self, i):
-        if i != self._lineno:
+        if i != self.lineno():
             raise RuntimeError, "accessing lines out of order"
         try:
             return self.next()
@@ -277,7 +264,11 @@
                 output.close()
         finally:
             file = self._file
-            self._file = 0
+            self._file = None
+            try:
+                del self._readline  # restore FileInput._readline
+            except AttributeError:
+                pass
             try:
                 if file and not self._isstdin:
                     file.close()
@@ -289,75 +280,72 @@
                     except OSError: pass
 
                 self._isstdin = False
-                self._buffer = []
-                self._bufindex = 0
 
     def readline(self):
-        try:
-            line = self._buffer[self._bufindex]
-        except IndexError:
-            pass
-        else:
-            self._bufindex += 1
-            self._lineno += 1
-            self._filelineno += 1
-            return line
-        if not self._file:
-            if not self._files:
-                return ""
-            self._filename = self._files[0]
-            self._files = self._files[1:]
-            self._filelineno = 0
-            self._file = None
-            self._isstdin = False
-            self._backupfilename = 0
-            if self._filename == '-':
-                self._filename = '<stdin>'
-                self._file = sys.stdin
-                self._isstdin = True
-            else:
-                if self._inplace:
-                    self._backupfilename = (
-                        self._filename + (self._backup or os.extsep+"bak"))
-                    try: os.unlink(self._backupfilename)
-                    except os.error: pass
-                    # The next few lines may raise IOError
-                    os.rename(self._filename, self._backupfilename)
-                    self._file = open(self._backupfilename, self._mode)
-                    try:
-                        perm = os.fstat(self._file.fileno()).st_mode
-                    except OSError:
-                        self._output = open(self._filename, "w")
-                    else:
-                        fd = os.open(self._filename,
-                                     os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
-                                     perm)
-                        self._output = os.fdopen(fd, "w")
-                        try:
-                            if hasattr(os, 'chmod'):
-                                os.chmod(self._filename, perm)
-                        except OSError:
-                            pass
-                    self._savestdout = sys.stdout
-                    sys.stdout = self._output
-                else:
-                    # This may raise IOError
-                    if self._openhook:
-                        self._file = self._openhook(self._filename, self._mode)
-                    else:
-                        self._file = open(self._filename, self._mode)
-        self._buffer = self._file.readlines(self._bufsize)
-        self._bufindex = 0
-        if not self._buffer:
+        while 1:
+            line = self._readline()
+            if line:
+                self._filelineno += 1
+                return line
+            if not self._file:
+                return line
             self.nextfile()
-        # Recursive call
-        return self.readline()
+            # repeat with next file
+
+    def _readline(self):
+        if not self._files:
+            return ""
+        self._filename = self._files[0]
+        self._files = self._files[1:]
+        self._startlineno = self.lineno()
+        self._filelineno = 0
+        self._file = None
+        self._isstdin = False
+        self._backupfilename = 0
+        if self._filename == '-':
+            self._filename = '<stdin>'
+            self._file = sys.stdin
+            self._isstdin = True
+        else:
+            if self._inplace:
+                self._backupfilename = (
+                    self._filename + (self._backup or os.extsep+"bak"))
+                try: os.unlink(self._backupfilename)
+                except os.error: pass
+                # The next few lines may raise IOError
+                os.rename(self._filename, self._backupfilename)
+                self._file = open(self._backupfilename, self._mode)
+                try:
+                    perm = os.fstat(self._file.fileno()).st_mode
+                except OSError:
+                    self._output = open(self._filename, "w")
+                else:
+                    fd = os.open(self._filename,
+                                    os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
+                                    perm)
+                    self._output = os.fdopen(fd, "w")
+                    try:
+                        if hasattr(os, 'chmod'):
+                            os.chmod(self._filename, perm)
+                    except OSError:
+                        pass
+                self._savestdout = sys.stdout
+                sys.stdout = self._output
+            else:
+                # This may raise IOError
+                if self._openhook:
+                    self._file = self._openhook(self._filename, self._mode)
+                else:
+                    self._file = open(self._filename, self._mode)
+
+        self._readline = self._file.readline  # hide FileInput._readline
+        return self._readline()
 
     def filename(self):
         return self._filename
 
     def lineno(self):
-        return self._lineno
+        return self._startlineno + self._filelineno
 
     def filelineno(self):
         return self._filelineno
diff --git a/lib/python2.7/hashlib.py b/lib/python2.7/hashlib.py
index bbd06b9..5f22631 100644
--- a/lib/python2.7/hashlib.py
+++ b/lib/python2.7/hashlib.py
@@ -146,6 +146,7 @@
     try:
         globals()[__func_name] = __get_hash(__func_name)
     except ValueError:
+        continue
         import logging
         logging.exception('code for hash %s was not found.', __func_name)
 
diff --git a/lib/python2.7/httplib.py b/lib/python2.7/httplib.py
index 9f1e088..7223ba1 100644
--- a/lib/python2.7/httplib.py
+++ b/lib/python2.7/httplib.py
@@ -772,8 +772,7 @@
         if self.sock:
             raise RuntimeError("Can't setup tunnel for established connection.")
 
-        self._tunnel_host = host
-        self._tunnel_port = port
+        self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
         if headers:
             self._tunnel_headers = headers
         else:
@@ -802,8 +801,8 @@
         self.debuglevel = level
 
     def _tunnel(self):
-        (host, port) = self._get_hostport(self._tunnel_host, self._tunnel_port)
-        self.send("CONNECT %s:%d HTTP/1.0\r\n" % (host, port))
+        self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
+            self._tunnel_port))
         for header, value in self._tunnel_headers.iteritems():
             self.send("%s: %s\r\n" % (header, value))
         self.send("\r\n")
@@ -811,6 +810,11 @@
                                        method = self._method)
         (version, code, message) = response._read_status()
 
+        if version == "HTTP/0.9":
+            # HTTP/0.9 doesn't support the CONNECT verb, so if httplib has
+            # concluded HTTP/0.9 is being used something has gone wrong.
+            self.close()
+            raise socket.error("Invalid response from tunnel request")
         if code != 200:
             self.close()
             raise socket.error("Tunnel connection failed: %d %s" % (code,
@@ -1063,7 +1067,7 @@
         elif body is not None:
             try:
                 thelen = str(len(body))
-            except TypeError:
+            except (TypeError, AttributeError):
                 # If this is a file-like object, try to
                 # fstat its file descriptor
                 try:
diff --git a/lib/python2.7/idlelib/AutoCompleteWindow.py b/lib/python2.7/idlelib/AutoCompleteWindow.py
index 27b0e56..205a29b 100644
--- a/lib/python2.7/idlelib/AutoCompleteWindow.py
+++ b/lib/python2.7/idlelib/AutoCompleteWindow.py
@@ -192,6 +192,7 @@
         scrollbar.config(command=listbox.yview)
         scrollbar.pack(side=RIGHT, fill=Y)
         listbox.pack(side=LEFT, fill=BOTH, expand=True)
+        acw.lift()  # work around bug in Tk 8.5.18+ (issue #24570)
 
         # Initialize the listbox selection
         self.listbox.select_set(self._binary_search(self.start))
diff --git a/lib/python2.7/idlelib/Bindings.py b/lib/python2.7/idlelib/Bindings.py
index 2072c7e..862e6ab 100644
--- a/lib/python2.7/idlelib/Bindings.py
+++ b/lib/python2.7/idlelib/Bindings.py
@@ -76,7 +76,6 @@
    ]),
  ('options', [
    ('Configure _IDLE', '<<open-config-dialog>>'),
-   ('Configure _Extensions', '<<open-config-extensions-dialog>>'),
    None,
    ]),
  ('help', [
diff --git a/lib/python2.7/idlelib/CREDITS.txt b/lib/python2.7/idlelib/CREDITS.txt
index 5ff599d..3a50eb8 100644
--- a/lib/python2.7/idlelib/CREDITS.txt
+++ b/lib/python2.7/idlelib/CREDITS.txt
@@ -24,7 +24,7 @@
 integration, debugger integration and persistent breakpoints).
 
 Scott David Daniels, Tal Einat, Hernan Foffani, Christos Georgiou,
-Jim Jewett, Martin v. Löwis, Jason Orendorff, Guilherme Polo, Josh Robb,
+Jim Jewett, Martin v. Löwis, Jason Orendorff, Guilherme Polo, Josh Robb,
 Nigel Rowe, Bruce Sherwood, Jeff Shute, and Weeble have submitted useful
 patches.  Thanks, guys!
 
diff --git a/lib/python2.7/idlelib/CallTipWindow.py b/lib/python2.7/idlelib/CallTipWindow.py
index 77384a0..90c2d7c 100644
--- a/lib/python2.7/idlelib/CallTipWindow.py
+++ b/lib/python2.7/idlelib/CallTipWindow.py
@@ -72,6 +72,7 @@
                            background="#ffffe0", relief=SOLID, borderwidth=1,
                            font = self.widget['font'])
         self.label.pack()
+        tw.lift()  # work around bug in Tk 8.5.18+ (issue #24570)
 
         self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME,
                                             self.checkhide_event)
diff --git a/lib/python2.7/idlelib/ClassBrowser.py b/lib/python2.7/idlelib/ClassBrowser.py
index 5be65ef..d09c52f 100644
--- a/lib/python2.7/idlelib/ClassBrowser.py
+++ b/lib/python2.7/idlelib/ClassBrowser.py
@@ -56,7 +56,7 @@
         self.settitle()
         top.focus_set()
         # create scrolled canvas
-        theme = idleConf.GetOption('main','Theme','name')
+        theme = idleConf.CurrentTheme()
         background = idleConf.GetHighlight(theme, 'normal')['background']
         sc = ScrolledCanvas(top, bg=background, highlightthickness=0, takefocus=1)
         sc.frame.pack(expand=1, fill="both")
diff --git a/lib/python2.7/idlelib/ColorDelegator.py b/lib/python2.7/idlelib/ColorDelegator.py
index 96c1fab..fec2670 100644
--- a/lib/python2.7/idlelib/ColorDelegator.py
+++ b/lib/python2.7/idlelib/ColorDelegator.py
@@ -62,7 +62,7 @@
         self.tag_raise('sel')
 
     def LoadTagDefs(self):
-        theme = idleConf.GetOption('main','Theme','name')
+        theme = idleConf.CurrentTheme()
         self.tagdefs = {
             "COMMENT": idleConf.GetHighlight(theme, "comment"),
             "KEYWORD": idleConf.GetHighlight(theme, "keyword"),
diff --git a/lib/python2.7/idlelib/Debugger.py b/lib/python2.7/idlelib/Debugger.py
index 71045dd..7fbe4a7 100644
--- a/lib/python2.7/idlelib/Debugger.py
+++ b/lib/python2.7/idlelib/Debugger.py
@@ -17,7 +17,10 @@
             self.set_step()
             return
         message = self.__frame2message(frame)
-        self.gui.interaction(message, frame)
+        try:
+            self.gui.interaction(message, frame)
+        except TclError:  # When closing debugger window with [x] in 3.x
+            pass
 
     def user_exception(self, frame, info):
         if self.in_rpc_code(frame):
@@ -59,8 +62,42 @@
         self.frame = None
         self.make_gui()
         self.interacting = 0
+        self.nesting_level = 0
 
     def run(self, *args):
+        # Deal with the scenario where we've already got a program running
+        # in the debugger and we want to start another. If that is the case,
+        # our second 'run' was invoked from an event dispatched not from
+        # the main event loop, but from the nested event loop in 'interaction'
+        # below. So our stack looks something like this:
+        #       outer main event loop
+        #         run()
+        #           <running program with traces>
+        #             callback to debugger's interaction()
+        #               nested event loop
+        #                 run() for second command
+        #
+        # This kind of nesting of event loops causes all kinds of problems
+        # (see e.g. issue #24455) especially when dealing with running as a
+        # subprocess, where there's all kinds of extra stuff happening in
+        # there - insert a traceback.print_stack() to check it out.
+        #
+        # By this point, we've already called restart_subprocess() in
+        # ScriptBinding. However, we also need to unwind the stack back to
+        # that outer event loop.  To accomplish this, we:
+        #   - return immediately from the nested run()
+        #   - abort_loop ensures the nested event loop will terminate
+        #   - the debugger's interaction routine completes normally
+        #   - the restart_subprocess() will have taken care of stopping
+        #     the running program, which will also let the outer run complete
+        #
+        # That leaves us back at the outer main event loop, at which point our
+        # after event can fire, and we'll come back to this routine with a
+        # clean stack.
+        if self.nesting_level > 0:
+            self.abort_loop()
+            self.root.after(100, lambda: self.run(*args))
+            return
         try:
             self.interacting = 1
             return self.idb.run(*args)
@@ -68,6 +105,10 @@
             self.interacting = 0
 
     def close(self, event=None):
+        try:
+            self.quit()
+        except Exception:
+            pass
         if self.interacting:
             self.top.bell()
             return
@@ -191,7 +232,12 @@
             b.configure(state="normal")
         #
         self.top.wakeup()
-        self.root.mainloop()
+        # Nested main loop: Tkinter's main loop is not reentrant, so use
+        # Tcl's vwait facility, which reenters the event loop until an
+        # event handler sets the variable we're waiting on
+        self.nesting_level += 1
+        self.root.tk.call('vwait', '::idledebugwait')
+        self.nesting_level -= 1
         #
         for b in self.buttons:
             b.configure(state="disabled")
@@ -215,23 +261,26 @@
 
     def cont(self):
         self.idb.set_continue()
-        self.root.quit()
+        self.abort_loop()
 
     def step(self):
         self.idb.set_step()
-        self.root.quit()
+        self.abort_loop()
 
     def next(self):
         self.idb.set_next(self.frame)
-        self.root.quit()
+        self.abort_loop()
 
     def ret(self):
         self.idb.set_return(self.frame)
-        self.root.quit()
+        self.abort_loop()
 
     def quit(self):
         self.idb.set_quit()
-        self.root.quit()
+        self.abort_loop()
+
+    def abort_loop(self):
+        self.root.tk.call('set', '::idledebugwait', '1')
 
     stackviewer = None
 
diff --git a/lib/python2.7/idlelib/EditorWindow.py b/lib/python2.7/idlelib/EditorWindow.py
index d34fc62..0c5b713 100644
--- a/lib/python2.7/idlelib/EditorWindow.py
+++ b/lib/python2.7/idlelib/EditorWindow.py
@@ -9,7 +9,6 @@
 import webbrowser
 
 from idlelib.MultiCall import MultiCallCreator
-from idlelib import idlever
 from idlelib import WindowList
 from idlelib import SearchDialog
 from idlelib import GrepDialog
@@ -18,6 +17,7 @@
 from idlelib.configHandler import idleConf
 from idlelib import aboutDialog, textView, configDialog
 from idlelib import macosxSupport
+from idlelib import help
 
 # The default tab setting for a Text widget, in average-width characters.
 TK_TABWIDTH_DEFAULT = 8
@@ -83,6 +83,11 @@
             near - a Toplevel widget (e.g. EditorWindow or PyShell)
                    to use as a reference for placing the help window
         """
+        import warnings as w
+        w.warn("EditorWindow.HelpDialog is no longer used by Idle.\n"
+               "It will be removed in 3.6 or later.\n"
+               "It has been replaced by private help.HelpWindow\n",
+               DeprecationWarning, stacklevel=2)
         if self.dlg is None:
             self.show_dialog(parent)
         if near:
@@ -109,9 +114,7 @@
         self.dlg = None
         self.parent = None
 
-helpDialog = HelpDialog()  # singleton instance
-def _help_dialog(parent):  # wrapper for htest
-    helpDialog.show_dialog(parent)
+helpDialog = HelpDialog()  # singleton instance, no longer used
 
 
 class EditorWindow(object):
@@ -154,7 +157,6 @@
                     EditorWindow.help_url = 'file://' + EditorWindow.help_url
             else:
                 EditorWindow.help_url = "https://docs.python.org/%d.%d/" % sys.version_info[:2]
-        currentTheme=idleConf.CurrentTheme()
         self.flist = flist
         root = root or flist.root
         self.root = root
@@ -182,6 +184,7 @@
                 'name': 'text',
                 'padx': 5,
                 'wrap': 'none',
+                'highlightthickness': 0,
                 'width': self.width,
                 'height': idleConf.GetOption('main', 'EditorWindow', 'height', type='int')}
         if TkVersion >= 8.5:
@@ -200,13 +203,13 @@
         if macosxSupport.isAquaTk():
             # Command-W on editorwindows doesn't work without this.
             text.bind('<<close-window>>', self.close_event)
-            # Some OS X systems have only one mouse button,
-            # so use control-click for pulldown menus there.
-            #  (Note, AquaTk defines <2> as the right button if
-            #   present and the Tk Text widget already binds <2>.)
+            # Some OS X systems have only one mouse button, so use
+            # control-click for popup context menus there. For two
+            # buttons, AquaTk defines <2> as the right button, not <3>.
             text.bind("<Control-Button-1>",self.right_menu_event)
+            text.bind("<2>", self.right_menu_event)
         else:
-            # Elsewhere, use right-click for pulldown menus.
+            # Elsewhere, use right-click for popup menus.
             text.bind("<3>",self.right_menu_event)
         text.bind("<<cut>>", self.cut)
         text.bind("<<copy>>", self.copy)
@@ -216,8 +219,6 @@
         text.bind("<<python-docs>>", self.python_docs)
         text.bind("<<about-idle>>", self.about_dialog)
         text.bind("<<open-config-dialog>>", self.config_dialog)
-        text.bind("<<open-config-extensions-dialog>>",
-                  self.config_extensions_dialog)
         text.bind("<<open-module>>", self.open_module)
         text.bind("<<do-nothing>>", lambda event: "break")
         text.bind("<<select-all>>", self.select_all)
@@ -258,13 +259,7 @@
         vbar['command'] = text.yview
         vbar.pack(side=RIGHT, fill=Y)
         text['yscrollcommand'] = vbar.set
-        fontWeight = 'normal'
-        if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'):
-            fontWeight='bold'
-        text.config(font=(idleConf.GetOption('main', 'EditorWindow', 'font'),
-                          idleConf.GetOption('main', 'EditorWindow',
-                                             'font-size', type='int'),
-                          fontWeight))
+        text['font'] = idleConf.GetFont(self.root, 'main', 'EditorWindow')
         text_frame.pack(side=LEFT, fill=BOTH, expand=1)
         text.pack(side=TOP, fill=BOTH, expand=1)
         text.focus_set()
@@ -318,7 +313,7 @@
         io.set_filename_change_hook(self.filename_change_hook)
 
         # Create the recent files submenu
-        self.recent_files_menu = Menu(self.menubar)
+        self.recent_files_menu = Menu(self.menubar, tearoff=0)
         self.menudict['file'].insert_cascade(3, label='Recent Files',
                                              underline=0,
                                              menu=self.recent_files_menu)
@@ -353,36 +348,6 @@
         self.askinteger = tkSimpleDialog.askinteger
         self.showerror = tkMessageBox.showerror
 
-        self._highlight_workaround()  # Fix selection tags on Windows
-
-    def _highlight_workaround(self):
-        # On Windows, Tk removes painting of the selection
-        # tags which is different behavior than on Linux and Mac.
-        # See issue14146 for more information.
-        if not sys.platform.startswith('win'):
-            return
-
-        text = self.text
-        text.event_add("<<Highlight-FocusOut>>", "<FocusOut>")
-        text.event_add("<<Highlight-FocusIn>>", "<FocusIn>")
-        def highlight_fix(focus):
-            sel_range = text.tag_ranges("sel")
-            if sel_range:
-                if focus == 'out':
-                    HILITE_CONFIG = idleConf.GetHighlight(
-                            idleConf.CurrentTheme(), 'hilite')
-                    text.tag_config("sel_fix", HILITE_CONFIG)
-                    text.tag_raise("sel_fix")
-                    text.tag_add("sel_fix", *sel_range)
-                elif focus == 'in':
-                    text.tag_remove("sel_fix", "1.0", "end")
-
-        text.bind("<<Highlight-FocusOut>>",
-                lambda ev: highlight_fix("out"))
-        text.bind("<<Highlight-FocusIn>>",
-                lambda ev: highlight_fix("in"))
-
-
     def _filename_to_unicode(self, filename):
         """convert filename to unicode in order to display it in Tk"""
         if isinstance(filename, unicode) or not filename:
@@ -446,6 +411,7 @@
 
     def set_status_bar(self):
         self.status_bar = self.MultiStatusBar(self.top)
+        sep = Frame(self.top, height=1, borderwidth=1, background='grey75')
         if sys.platform == "darwin":
             # Insert some padding to avoid obscuring some of the statusbar
             # by the resize widget.
@@ -453,6 +419,7 @@
         self.status_bar.set_label('column', 'Col: ?', side=RIGHT)
         self.status_bar.set_label('line', 'Ln: ?', side=RIGHT)
         self.status_bar.pack(side=BOTTOM, fill=X)
+        sep.pack(side=BOTTOM, fill=X)
         self.text.bind("<<set-line-and-column>>", self.set_line_and_column)
         self.text.event_add("<<set-line-and-column>>",
                             "<KeyRelease>", "<ButtonRelease>")
@@ -479,12 +446,13 @@
         self.menudict = menudict = {}
         for name, label in self.menu_specs:
             underline, label = prepstr(label)
-            menudict[name] = menu = Menu(mbar, name=name)
+            menudict[name] = menu = Menu(mbar, name=name, tearoff=0)
             mbar.add_cascade(label=label, menu=menu, underline=underline)
 
         if macosxSupport.isCarbonTk():
             # Insert the application menu
-            menudict['application'] = menu = Menu(mbar, name='apple')
+            menudict['application'] = menu = Menu(mbar, name='apple',
+                                                tearoff=0)
             mbar.add_cascade(label='IDLE', menu=menu)
 
         self.fill_menus()
@@ -565,19 +533,23 @@
             return 'normal'
 
     def about_dialog(self, event=None):
+        "Handle Help 'About IDLE' event."
+        # Synchronize with macosxSupport.overrideRootMenu.about_dialog.
         aboutDialog.AboutDialog(self.top,'About IDLE')
 
     def config_dialog(self, event=None):
+        "Handle Options 'Configure IDLE' event."
+        # Synchronize with macosxSupport.overrideRootMenu.config_dialog.
         configDialog.ConfigDialog(self.top,'Settings')
-    def config_extensions_dialog(self, event=None):
-        configDialog.ConfigExtensionsDialog(self.top)
 
     def help_dialog(self, event=None):
+        "Handle Help 'IDLE Help' event."
+        # Synchronize with macosxSupport.overrideRootMenu.help_dialog.
         if self.root:
             parent = self.root
         else:
             parent = self.top
-        helpDialog.display(parent, near=self.top)
+        help.show_idlehelp(parent)
 
     def python_docs(self, event=None):
         if sys.platform[:3] == 'win':
@@ -785,7 +757,7 @@
         # Called from self.filename_change_hook and from configDialog.py
         self._rmcolorizer()
         self._addcolorizer()
-        theme = idleConf.GetOption('main','Theme','name')
+        theme = idleConf.CurrentTheme()
         normal_colors = idleConf.GetHighlight(theme, 'normal')
         cursor_color = idleConf.GetHighlight(theme, 'cursor', fgBg='fg')
         select_colors = idleConf.GetHighlight(theme, 'hilite')
@@ -796,17 +768,15 @@
             selectforeground=select_colors['foreground'],
             selectbackground=select_colors['background'],
             )
+        if TkVersion >= 8.5:
+            self.text.config(
+                inactiveselectbackground=select_colors['background'])
 
     def ResetFont(self):
         "Update the text widgets' font if it is changed"
         # Called from configDialog.py
-        fontWeight='normal'
-        if idleConf.GetOption('main','EditorWindow','font-bold',type='bool'):
-            fontWeight='bold'
-        self.text.config(font=(idleConf.GetOption('main','EditorWindow','font'),
-                idleConf.GetOption('main','EditorWindow','font-size',
-                                   type='int'),
-                fontWeight))
+
+        self.text['font'] = idleConf.GetFont(self.root, 'main','EditorWindow')
 
     def RemoveKeybindings(self):
         "Remove the keybindings before they are changed."
@@ -920,8 +890,10 @@
         except IOError as err:
             if not getattr(self.root, "recentfilelist_error_displayed", False):
                 self.root.recentfilelist_error_displayed = True
-                tkMessageBox.showerror(title='IDLE Error',
-                    message='Unable to update Recent Files list:\n%s'
+                tkMessageBox.showwarning(title='IDLE Warning',
+                    message="Cannot update File menu Recent Files list. "
+                            "Your operating system says:\n%s\n"
+                            "Select OK and IDLE will continue without updating."
                         % str(err),
                     parent=self.text)
         # for each edit window instance, construct the recent files menu
@@ -1729,4 +1701,4 @@
 
 if __name__ == '__main__':
     from idlelib.idle_test.htest import run
-    run(_help_dialog, _editor_window)
+    run(_editor_window)
diff --git a/lib/python2.7/idlelib/GrepDialog.py b/lib/python2.7/idlelib/GrepDialog.py
index afb9a21..d86d50d 100644
--- a/lib/python2.7/idlelib/GrepDialog.py
+++ b/lib/python2.7/idlelib/GrepDialog.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 import os
 import fnmatch
 import re  # for htest
@@ -5,7 +6,6 @@
 from Tkinter import StringVar, BooleanVar, Checkbutton  # for GrepDialog
 from Tkinter import Tk, Text, Button, SEL, END  # for htest
 from idlelib import SearchEngine
-import itertools
 from idlelib.SearchDialogBase import SearchDialogBase
 # Importing OutputWindow fails due to import loop
 # EditorWindow -> GrepDialop -> OutputWindow -> EditorWindow
diff --git a/lib/python2.7/idlelib/IOBinding.py b/lib/python2.7/idlelib/IOBinding.py
index e7d747c..ef40715 100644
--- a/lib/python2.7/idlelib/IOBinding.py
+++ b/lib/python2.7/idlelib/IOBinding.py
@@ -5,22 +5,20 @@
 #     end-of-line conventions, instead of relying on the standard library,
 #     which will only understand the local convention.
 
-import os
-import types
-import pipes
-import sys
 import codecs
+from codecs import BOM_UTF8
+import os
+import pipes
+import re
+import sys
 import tempfile
+
 import tkFileDialog
 import tkMessageBox
-import re
-from Tkinter import *
 from SimpleDialog import SimpleDialog
 
 from idlelib.configHandler import idleConf
 
-from codecs import BOM_UTF8
-
 # Try setting the locale, so that we can find out
 # what encoding to use
 try:
@@ -141,7 +139,6 @@
         raise LookupError, "Unknown encoding "+name
     return name
 
-
 class IOBinding:
 
     def __init__(self, editwin):
@@ -251,7 +248,7 @@
             with open(filename, 'rb') as f:
                 chars = f.read()
         except IOError as msg:
-            tkMessageBox.showerror("I/O Error", str(msg), master=self.text)
+            tkMessageBox.showerror("I/O Error", str(msg), parent=self.text)
             return False
 
         chars = self.decode(chars)
@@ -298,7 +295,7 @@
                 title="Error loading the file",
                 message="The encoding '%s' is not known to this Python "\
                 "installation. The file may not display correctly" % name,
-                master = self.text)
+                parent = self.text)
             enc = None
         if enc:
             try:
@@ -328,7 +325,7 @@
                   title="Save On Close",
                   message=message,
                   default=tkMessageBox.YES,
-                  master=self.text)
+                  parent=self.text)
         if confirm:
             reply = "yes"
             self.save(None)
@@ -387,11 +384,11 @@
             return True
         except IOError as msg:
             tkMessageBox.showerror("I/O Error", str(msg),
-                                   master=self.text)
+                                   parent=self.text)
             return False
 
     def encode(self, chars):
-        if isinstance(chars, types.StringType):
+        if isinstance(chars, str):
             # This is either plain ASCII, or Tk was returning mixed-encoding
             # text to us. Don't try to guess further.
             return chars
@@ -417,7 +414,7 @@
             tkMessageBox.showerror(
                 "I/O Error",
                 "%s. Saving as UTF-8" % failed,
-                master = self.text)
+                parent = self.text)
         # If there was a UTF-8 signature, use that. This should not fail
         if self.fileencoding == BOM_UTF8 or failed:
             return BOM_UTF8 + chars.encode("utf-8")
@@ -430,7 +427,7 @@
                     "I/O Error",
                     "Cannot save this as '%s' anymore. Saving as UTF-8" \
                     % self.fileencoding,
-                    master = self.text)
+                    parent = self.text)
                 return BOM_UTF8 + chars.encode("utf-8")
         # Nothing was declared, and we had not determined an encoding
         # on loading. Recommend an encoding line.
@@ -474,7 +471,7 @@
                   title="Print",
                   message="Print to Default Printer",
                   default=tkMessageBox.OK,
-                  master=self.text)
+                  parent=self.text)
         if not confirm:
             self.text.focus_set()
             return "break"
@@ -511,10 +508,10 @@
                          status + output
             if output:
                 output = "Printing command: %s\n" % repr(command) + output
-                tkMessageBox.showerror("Print status", output, master=self.text)
+                tkMessageBox.showerror("Print status", output, parent=self.text)
         else:  #no printing for this platform
             message = "Printing is not enabled for this platform: %s" % platform
-            tkMessageBox.showinfo("Print status", message, master=self.text)
+            tkMessageBox.showinfo("Print status", message, parent=self.text)
         if tempfilename:
             os.unlink(tempfilename)
         return "break"
@@ -533,7 +530,7 @@
     def askopenfile(self):
         dir, base = self.defaultfilename("open")
         if not self.opendialog:
-            self.opendialog = tkFileDialog.Open(master=self.text,
+            self.opendialog = tkFileDialog.Open(parent=self.text,
                                                 filetypes=self.filetypes)
         filename = self.opendialog.show(initialdir=dir, initialfile=base)
         if isinstance(filename, unicode):
@@ -556,7 +553,7 @@
         dir, base = self.defaultfilename("save")
         if not self.savedialog:
             self.savedialog = tkFileDialog.SaveAs(
-                    master=self.text,
+                    parent=self.text,
                     filetypes=self.filetypes,
                     defaultextension=self.defaultextension)
         filename = self.savedialog.show(initialdir=dir, initialfile=base)
@@ -568,8 +565,11 @@
         "Update recent file list on all editor windows"
         self.editwin.update_recent_files_list(filename)
 
-def _io_binding(parent):
-    root = Tk()
+
+def _io_binding(parent):  # htest #
+    from Tkinter import Toplevel, Text
+
+    root = Toplevel(parent)
     root.title("Test IOBinding")
     width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
     root.geometry("+%d+%d"%(x, y + 150))
@@ -578,20 +578,30 @@
             self.text = text
             self.flist = None
             self.text.bind("<Control-o>", self.open)
+            self.text.bind('<Control-p>', self.printer)
             self.text.bind("<Control-s>", self.save)
+            self.text.bind("<Alt-s>", self.saveas)
+            self.text.bind('<Control-c>', self.savecopy)
         def get_saved(self): return 0
         def set_saved(self, flag): pass
         def reset_undo(self): pass
+        def update_recent_files_list(self, filename): pass
         def open(self, event):
             self.text.event_generate("<<open-window-from-file>>")
+        def printer(self, event):
+            self.text.event_generate("<<print-window>>")
         def save(self, event):
             self.text.event_generate("<<save-window>>")
+        def saveas(self, event):
+            self.text.event_generate("<<save-window-as-file>>")
+        def savecopy(self, event):
+            self.text.event_generate("<<save-copy-of-window-as-file>>")
 
     text = Text(root)
     text.pack()
     text.focus_set()
     editwin = MyEditWin(text)
-    io = IOBinding(editwin)
+    IOBinding(editwin)
 
 if __name__ == "__main__":
     from idlelib.idle_test.htest import run
diff --git a/lib/python2.7/idlelib/MultiStatusBar.py b/lib/python2.7/idlelib/MultiStatusBar.py
index df136b8..e3d59ee 100644
--- a/lib/python2.7/idlelib/MultiStatusBar.py
+++ b/lib/python2.7/idlelib/MultiStatusBar.py
@@ -8,13 +8,15 @@
         Frame.__init__(self, master, **kw)
         self.labels = {}
 
-    def set_label(self, name, text='', side=LEFT):
+    def set_label(self, name, text='', side=LEFT, width=0):
         if name not in self.labels:
-            label = Label(self, bd=1, relief=SUNKEN, anchor=W)
-            label.pack(side=side)
+            label = Label(self, borderwidth=0, anchor=W)
+            label.pack(side=side, pady=0, padx=4)
             self.labels[name] = label
         else:
             label = self.labels[name]
+        if width != 0:
+            label.config(width=width)
         label.config(text=text)
 
 def _multistatus_bar(parent):
diff --git a/lib/python2.7/idlelib/NEWS.txt b/lib/python2.7/idlelib/NEWS.txt
index 68d4e93..3336e2f 100644
--- a/lib/python2.7/idlelib/NEWS.txt
+++ b/lib/python2.7/idlelib/NEWS.txt
@@ -1,7 +1,139 @@
+What's New in IDLE 2.7.11?
+==========================
+*Release date: 2015-12-06*
+
+- Issue 15348: Stop the debugger engine (normally in a user process)
+  before closing the debugger window (running in the IDLE process).
+  This prevents the RuntimeErrors that were being caught and ignored.
+
+- Issue #24455: Prevent IDLE from hanging when a) closing the shell while the
+  debugger is active (15347); b) closing the debugger with the [X] button
+  (15348); and c) activating the debugger when already active (24455).
+  The patch by Mark Roseman does this by making two changes.
+  1. Suspend and resume the gui.interaction method with the tcl vwait
+  mechanism intended for this purpose (instead of root.mainloop & .quit).
+  2. In gui.run, allow any existing interaction to terminate first.
+
+- Change 'The program' to 'Your program' in an IDLE 'kill program?' message
+  to make it clearer that the program referred to is the currently running
+  user program, not IDLE itself.
+
+- Issue #24750: Improve the appearance of the IDLE editor window status bar.
+  Patch by Mark Roseman.
+
+- Issue #25313: Change the handling of new built-in text color themes to better
+  address the compatibility problem introduced by the addition of IDLE Dark.
+  Consistently use the revised idleConf.CurrentTheme everywhere in idlelib.
+
+- Issue #24782: Extension configuration is now a tab in the IDLE Preferences
+  dialog rather than a separate dialog.   The former tabs are now a sorted
+  list.  Patch by Mark Roseman.
+
+- Issue #22726: Re-activate the config dialog help button with some content
+  about the other buttons and the new IDLE Dark theme.
+
+- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme.
+  It is more or less IDLE Classic inverted, with a cobalt blue background.
+  Strings, comments, keywords, ... are still green, red, orange, ... .
+  To use it with IDLEs released before November 2015, hit the
+  'Save as New Custom Theme' button and enter a new name,
+  such as 'Custom Dark'.  The custom theme will work with any IDLE
+  release, and can be modified.
+
+- Issue #25224: README.txt is now an idlelib index for IDLE developers and
+  curious users.  The previous user content is now in the IDLE doc chapter.
+  'IDLE' now means 'Integrated Development and Learning Environment'.
+
+- Issue #24820: Users can now set breakpoint colors in
+  Settings -> Custom Highlighting.  Original patch by Mark Roseman.
+
+- Issue #24972: Inactive selection background now matches active selection
+  background, as configured by users, on all systems.  Found items are now
+  always highlighted on Windows.  Initial patch by Mark Roseman.
+
+- Issue #24570: Idle: make calltip and completion boxes appear on Macs
+  affected by a tk regression.  Initial patch by Mark Roseman.
+
+- Issue #24988: Idle ScrolledList context menus (used in debugger)
+  now work on Mac Aqua.  Patch by Mark Roseman.
+
+- Issue #24801: Make right-click for context menu work on Mac Aqua.
+  Patch by Mark Roseman.
+
+- Issue #25173: Associate tkinter messageboxes with a specific widget.
+  For Mac OSX, make them a 'sheet'.  Patch by Mark Roseman.
+
+- Issue #25198: Enhance the initial html viewer now used for Idle Help.
+  * Properly indent fixed-pitch text (patch by Mark Roseman).
+  * Give code snippet a very Sphinx-like light blueish-gray background.
+  * Re-use initial width and height set by users for shell and editor.
+  * When the Table of Contents (TOC) menu is used, put the section header
+  at the top of the screen.
+
+- Issue #25225: Condense and rewrite Idle doc section on text colors.
+
+- Issue #21995: Explain some differences between IDLE and console Python.
+
+- Issue #22820: Explain need for *print* when running file from Idle editor.
+
+- Issue #25224: Doc: augment Idle feature list and no-subprocess section.
+
+- Issue #25219: Update doc for Idle command line options.
+  Some were missing and notes were not correct.
+
+- Issue #24861: Most of idlelib is private and subject to change.
+  Use idleib.idle.* to start Idle. See idlelib.__init__.__doc__.
+
+- Issue #25199: Idle: add synchronization comments for future maintainers.
+
+- Issue #16893: Replace help.txt with help.html for Idle doc display.
+  The new idlelib/help.html is rstripped Doc/build/html/library/idle.html.
+  It looks better than help.txt and will better document Idle as released.
+  The tkinter html viewer that works for this file was written by Mark Roseman.
+  The now unused EditorWindow.HelpDialog class and helt.txt file are deprecated.
+
+- Issue #24199: Deprecate unused idlelib.idlever with possible removal in 3.6.
+
+- Issue #24790: Remove extraneous code (which also create 2 & 3 conflicts).
+
+- Issue #23672: Allow Idle to edit and run files with astral chars in name.
+  Patch by Mohd Sanad Zaki Rizvi.
+
+- Issue 24745: Idle editor default font. Switch from Courier to
+  platform-sensitive TkFixedFont.  This should not affect current customized
+  font selections.  If there is a problem, edit $HOME/.idlerc/config-main.cfg
+  and remove 'fontxxx' entries from [Editor Window].  Patch by Mark Roseman.
+
+- Issue #21192: Idle editor. When a file is run, put its name in the restart bar.
+  Do not print false prompts. Original patch by Adnan Umer.
+
+- Issue #13884: Idle menus. Remove tearoff lines. Patch by Roger Serwy.
+
+- Issue #15809: IDLE shell now uses locale encoding instead of Latin1 for
+  decoding unicode literals.
+
+
+What's New in IDLE 2.7.10?
+=========================
+*Release date: 2015-05-23*
+
+- Issue #23583: Fixed writing unicode to standard output stream in IDLE.
+
+- Issue #20577: Configuration of the max line length for the FormatParagraph
+  extension has been moved from the General tab of the Idle preferences dialog
+  to the FormatParagraph tab of the Config Extensions dialog.
+  Patch by Tal Einat.
+
+- Issue #16893: Update Idle doc chapter to match current Idle and add new
+  information.
+
+- Issue #23180: Rename IDLE "Windows" menu item to "Window".
+  Patch by Al Sweigart.
+
+
 What's New in IDLE 2.7.9?
 =========================
-
-*Release data: 2014-12-07* (projected)
+*Release date: 2014-12-10*
 
 - Issue #16893: Update Idle doc chapter to match current Idle and add new
   information.
@@ -30,12 +162,11 @@
   move version to end.
 
 - Issue #14105: Idle debugger breakpoints no longer disappear
-  when inseting or deleting lines.
+  when inserting or deleting lines.
 
 
 What's New in IDLE 2.7.8?
 =========================
-
 *Release date: 2014-06-29*
 
 - Issue #21940: Add unittest for WidgetRedirector. Initial patch by Saimadhav
@@ -63,7 +194,6 @@
 
 What's New in IDLE 2.7.7?
 =========================
-
 *Release date: 2014-05-31*
 
 - Issue #18104: Add idlelib/idle_test/htest.py with a few sample tests to begin
@@ -101,7 +231,6 @@
 
 What's New in IDLE 2.7.6?
 =========================
-
 *Release date: 2013-11-10*
 
 - Issue #19426: Fixed the opening of Python source file with specified encoding.
@@ -149,7 +278,6 @@
 
 What's New in IDLE 2.7.5?
 =========================
-
 *Release date: 2013-05-12*
 
 - Issue #17838: Allow sys.stdin to be reassigned.
@@ -184,7 +312,6 @@
 
 What's New in IDLE 2.7.4?
 =========================
-
 *Release date: 2013-04-06*
 
 - Issue #17625: In IDLE, close the replace dialog after it is used.
@@ -255,7 +382,6 @@
 
 What's New in IDLE 2.7.3?
 =========================
-
 *Release date: 2012-04-09*
 
 - Issue #964437 Make IDLE help window non-modal.
@@ -288,7 +414,6 @@
 
 What's New in IDLE 2.7.2?
 =========================
-
 *Release date: 2011-06-11*
 
 - Issue #11718: IDLE's open module dialog couldn't find the __init__.py
@@ -333,7 +458,6 @@
 
 What's New in Python 2.7.1?
 ===========================
-
 *Release date: 2010-11-27*
 
 - Issue #6378: idle.bat now runs with the appropriate Python version rather than
@@ -342,7 +466,6 @@
 
 What's New in IDLE 2.7?
 =======================
-
 *Release date: 2010-07-03*
 
 - Issue #5150: IDLE's format menu now has an option to strip trailing
@@ -374,7 +497,6 @@
 
 What's New in IDLE 2.6?
 =======================
-
 *Release date: 01-Oct-2008*
 
 - Issue #2665: On Windows, an IDLE installation upgraded from an old version
@@ -388,11 +510,6 @@
 - Autocompletion of filenames now support alternate separators, e.g. the
   '/' char on Windows.  Patch 2061 Tal Einat.
 
-What's New in IDLE 2.6a1?
-=========================
-
-*Release date: 29-Feb-2008*
-
 - Configured selection highlighting colors were ignored; updating highlighting
   in the config dialog would cause non-Python files to be colored as if they
   were Python source; improve use of ColorDelagator.  Patch 1334. Tal Einat.
@@ -464,15 +581,8 @@
 
 What's New in IDLE 1.2?
 =======================
-
 *Release date: 19-SEP-2006*
 
-
-What's New in IDLE 1.2c1?
-=========================
-
-*Release date: 17-AUG-2006*
-
 - File menu hotkeys: there were three 'p' assignments.  Reassign the
   'Save Copy As' and 'Print' hotkeys to 'y' and 't'.  Change the
   Shell hotkey from 's' to 'l'.
@@ -493,11 +603,6 @@
 - When used w/o subprocess, all exceptions were preceded by an error
   message claiming they were IDLE internal errors (since 1.2a1).
 
-What's New in IDLE 1.2b3?
-=========================
-
-*Release date: 03-AUG-2006*
-
 - Bug #1525817: Don't truncate short lines in IDLE's tool tips.
 
 - Bug #1517990: IDLE keybindings on MacOS X now work correctly
@@ -521,26 +626,6 @@
   'as' keyword in comment directly following import command. Closes 1325071.
   Patch 1479219 Tal Einat
 
-What's New in IDLE 1.2b2?
-=========================
-
-*Release date: 11-JUL-2006*
-
-What's New in IDLE 1.2b1?
-=========================
-
-*Release date: 20-JUN-2006*
-
-What's New in IDLE 1.2a2?
-=========================
-
-*Release date: 27-APR-2006*
-
-What's New in IDLE 1.2a1?
-=========================
-
-*Release date: 05-APR-2006*
-
 - Patch #1162825: Support non-ASCII characters in IDLE window titles.
 
 - Source file f.flush() after writing; trying to avoid lossage if user
@@ -620,19 +705,14 @@
 - The remote procedure call module rpc.py can now access data attributes of
   remote registered objects.  Changes to these attributes are local, however.
 
+
 What's New in IDLE 1.1?
 =======================
-
 *Release date: 30-NOV-2004*
 
 - On OpenBSD, terminating IDLE with ctrl-c from the command line caused a
   stuck subprocess MainThread because only the SocketThread was exiting.
 
-What's New in IDLE 1.1b3/rc1?
-=============================
-
-*Release date: 18-NOV-2004*
-
 - Saving a Keyset w/o making changes (by using the "Save as New Custom Key Set"
   button) caused IDLE to fail on restart (no new keyset was created in
   config-keys.cfg).  Also true for Theme/highlights.  Python Bug 1064535.
@@ -640,28 +720,12 @@
 - A change to the linecache.py API caused IDLE to exit when an exception was
   raised while running without the subprocess (-n switch).  Python Bug 1063840.
 
-What's New in IDLE 1.1b2?
-=========================
-
-*Release date: 03-NOV-2004*
-
 - When paragraph reformat width was made configurable, a bug was
   introduced that caused reformatting of comment blocks to ignore how
   far the block was indented, effectively adding the indentation width
   to the reformat width.  This has been repaired, and the reformat
   width is again a bound on the total width of reformatted lines.
 
-What's New in IDLE 1.1b1?
-=========================
-
-*Release date: 15-OCT-2004*
-
-
-What's New in IDLE 1.1a3?
-=========================
-
-*Release date: 02-SEP-2004*
-
 - Improve keyboard focus binding, especially in Windows menu.  Improve
   window raising, especially in the Windows menu and in the debugger.
   IDLEfork 763524.
@@ -669,24 +733,12 @@
 - If user passes a non-existent filename on the commandline, just
   open a new file, don't raise a dialog.  IDLEfork 854928.
 
-
-What's New in IDLE 1.1a2?
-=========================
-
-*Release date: 05-AUG-2004*
-
 - EditorWindow.py was not finding the .chm help file on Windows.  Typo
   at Rev 1.54.  Python Bug 990954
 
 - checking sys.platform for substring 'win' was breaking IDLE docs on Mac
   (darwin).  Also, Mac Safari browser requires full file:// URIs.  SF 900580.
 
-
-What's New in IDLE 1.1a1?
-=========================
-
-*Release date: 08-JUL-2004*
-
 - Redirect the warning stream to the shell during the ScriptBinding check of
   user code and format the warning similarly to an exception for both that
   check and for runtime warnings raised in the subprocess.
@@ -749,26 +801,10 @@
 
 What's New in IDLE 1.0?
 =======================
-
 *Release date: 29-Jul-2003*
 
-- Added a banner to the shell discussing warnings possibly raised by personal
-  firewall software.  Added same comment to README.txt.
-
-
-What's New in IDLE 1.0 release candidate 2?
-===========================================
-
-*Release date: 24-Jul-2003*
-
 - Calltip error when docstring was None  Python Bug 775541
 
-
-What's New in IDLE 1.0 release candidate 1?
-===========================================
-
-*Release date: 18-Jul-2003*
-
 - Updated extend.txt, help.txt, and config-extensions.def to correctly
   reflect the current status of the configuration system.  Python Bug 768469
 
@@ -784,12 +820,6 @@
   sys.std{in|out|err}.encoding, for both the local and the subprocess case.
   SF IDLEfork patch 682347.
 
-
-What's New in IDLE 1.0b2?
-=========================
-
-*Release date: 29-Jun-2003*
-
 - Extend AboutDialog.ViewFile() to support file encodings.  Make the CREDITS
   file Latin-1.
 
@@ -828,7 +858,6 @@
 
 What's New in IDLEfork 0.9b1?
 =============================
-
 *Release date: 02-Jun-2003*
 
 - The current working directory of the execution environment (and shell
@@ -930,10 +959,8 @@
   exception formatting to the subprocess.
 
 
-
 What's New in IDLEfork 0.9 Alpha 2?
 ===================================
-
 *Release date: 27-Jan-2003*
 
 - Updated INSTALL.txt to claify use of the python2 rpm.
@@ -1037,7 +1064,6 @@
 
 What's New in IDLEfork 0.9 Alpha 1?
 ===================================
-
 *Release date: 31-Dec-2002*
 
 - First release of major new functionality.  For further details refer to
diff --git a/lib/python2.7/idlelib/OutputWindow.py b/lib/python2.7/idlelib/OutputWindow.py
index e18d846..63dc737 100644
--- a/lib/python2.7/idlelib/OutputWindow.py
+++ b/lib/python2.7/idlelib/OutputWindow.py
@@ -96,7 +96,7 @@
                     "No special line",
                     "The line you point at doesn't look like "
                     "a valid file name followed by a line number.",
-                    master=self.text)
+                    parent=self.text)
                 return
         filename, lineno = result
         edit = self.flist.open(filename)
diff --git a/lib/python2.7/idlelib/PathBrowser.py b/lib/python2.7/idlelib/PathBrowser.py
index ef7f8ff..ae26714 100644
--- a/lib/python2.7/idlelib/PathBrowser.py
+++ b/lib/python2.7/idlelib/PathBrowser.py
@@ -17,6 +17,7 @@
         self.init(flist)
 
     def settitle(self):
+        "Set window titles."
         self.top.wm_title("Path Browser")
         self.top.wm_iconname("Path Browser")
 
@@ -70,7 +71,7 @@
 
     def ispackagedir(self, file):
         if not os.path.isdir(file):
-            return 0
+            return False
         init = os.path.join(file, "__init__.py")
         return os.path.exists(init)
 
@@ -91,7 +92,7 @@
         sorted.sort()
         return sorted
 
-def _path_browser(parent):
+def _path_browser(parent):  # htest #
     flist = PyShellFileList(parent)
     PathBrowser(flist, _htest=True)
     parent.mainloop()
diff --git a/lib/python2.7/idlelib/PyShell.py b/lib/python2.7/idlelib/PyShell.py
index 79db883..ea0d7de 100755
--- a/lib/python2.7/idlelib/PyShell.py
+++ b/lib/python2.7/idlelib/PyShell.py
@@ -10,8 +10,6 @@
 import socket
 import time
 import threading
-import traceback
-import types
 import io
 
 import linecache
@@ -32,11 +30,11 @@
 from idlelib.UndoDelegator import UndoDelegator
 from idlelib.OutputWindow import OutputWindow
 from idlelib.configHandler import idleConf
-from idlelib import idlever
 from idlelib import rpc
 from idlelib import Debugger
 from idlelib import RemoteDebugger
 from idlelib import macosxSupport
+from idlelib import IOBinding
 
 IDENTCHARS = string.ascii_letters + string.digits + "_"
 HOST = '127.0.0.1' # python execution server on localhost loopback
@@ -160,7 +158,7 @@
             # possible due to update in restore_file_breaks
             return
         if color:
-            theme = idleConf.GetOption('main','Theme','name')
+            theme = idleConf.CurrentTheme()
             cfg = idleConf.GetHighlight(theme, "break")
         else:
             cfg = {'foreground': '', 'background': ''}
@@ -171,7 +169,7 @@
         filename = self.io.filename
         text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
         try:
-            i = self.breakpoints.index(lineno)
+            self.breakpoints.index(lineno)
         except ValueError:  # only add if missing, i.e. do once
             self.breakpoints.append(lineno)
         try:    # update the subprocess debugger
@@ -345,7 +343,7 @@
 
     def LoadTagDefs(self):
         ColorDelegator.LoadTagDefs(self)
-        theme = idleConf.GetOption('main','Theme','name')
+        theme = idleConf.CurrentTheme()
         self.tagdefs.update({
             "stdin": {'background':None,'foreground':None},
             "stdout": idleConf.GetHighlight(theme, "stdout"),
@@ -439,7 +437,7 @@
             try:
                 self.rpcclt = MyRPCClient(addr)
                 break
-            except socket.error as err:
+            except socket.error:
                 pass
         else:
             self.display_port_binding_error()
@@ -460,7 +458,7 @@
         self.rpcclt.listening_sock.settimeout(10)
         try:
             self.rpcclt.accept()
-        except socket.timeout as err:
+        except socket.timeout:
             self.display_no_subprocess_error()
             return None
         self.rpcclt.register("console", self.tkconsole)
@@ -474,7 +472,7 @@
         self.poll_subprocess()
         return self.rpcclt
 
-    def restart_subprocess(self, with_cwd=False):
+    def restart_subprocess(self, with_cwd=False, filename=''):
         if self.restarting:
             return self.rpcclt
         self.restarting = True
@@ -495,25 +493,24 @@
         self.spawn_subprocess()
         try:
             self.rpcclt.accept()
-        except socket.timeout as err:
+        except socket.timeout:
             self.display_no_subprocess_error()
             return None
         self.transfer_path(with_cwd=with_cwd)
         console.stop_readline()
         # annotate restart in shell window and mark it
         console.text.delete("iomark", "end-1c")
-        if was_executing:
-            console.write('\n')
-            console.showprompt()
-        halfbar = ((int(console.width) - 16) // 2) * '='
-        console.write(halfbar + ' RESTART ' + halfbar)
+        tag = 'RESTART: ' + (filename if filename else 'Shell')
+        halfbar = ((int(console.width) -len(tag) - 4) // 2) * '='
+        console.write("\n{0} {1} {0}".format(halfbar, tag))
         console.text.mark_set("restart", "end-1c")
         console.text.mark_gravity("restart", "left")
-        console.showprompt()
+        if not filename:
+            console.showprompt()
         # restart subprocess debugger
         if debug:
             # Restarted debugger connects to current instance of debug GUI
-            gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
+            RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
             # reload remote debugger breakpoints for all PyShellEditWindows
             debug.load_breakpoints()
         self.compile.compiler.flags = self.original_compiler_flags
@@ -634,7 +631,7 @@
         item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
         from idlelib.TreeWidget import ScrolledCanvas, TreeNode
         top = Toplevel(self.tkconsole.root)
-        theme = idleConf.GetOption('main','Theme','name')
+        theme = idleConf.CurrentTheme()
         background = idleConf.GetHighlight(theme, 'normal')['background']
         sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
         sc.frame.pack(expand=1, fill="both")
@@ -654,7 +651,7 @@
         if source is None:
             source = open(filename, "r").read()
         try:
-            code = compile(source, filename, "exec")
+            code = compile(source, filename, "exec", dont_inherit=True)
         except (OverflowError, SyntaxError):
             self.tkconsole.resetoutput()
             print('*** Error in script or command!\n'
@@ -671,10 +668,11 @@
         self.more = 0
         self.save_warnings_filters = warnings.filters[:]
         warnings.filterwarnings(action="error", category=SyntaxWarning)
-        if isinstance(source, types.UnicodeType):
-            from idlelib import IOBinding
+        if isinstance(source, unicode) and IOBinding.encoding != 'utf-8':
             try:
-                source = source.encode(IOBinding.encoding)
+                source = '# -*- coding: %s -*-\n%s' % (
+                        IOBinding.encoding,
+                        source.encode(IOBinding.encoding))
             except UnicodeError:
                 self.tkconsole.resetoutput()
                 self.write("Unsupported characters in input\n")
@@ -801,7 +799,7 @@
                     "Exit?",
                     "Do you want to exit altogether?",
                     default="yes",
-                    master=self.tkconsole.text):
+                    parent=self.tkconsole.text):
                     raise
                 else:
                     self.showtraceback()
@@ -839,7 +837,7 @@
             "Run IDLE with the -n command line switch to start without a "
             "subprocess and refer to Help/IDLE Help 'Running without a "
             "subprocess' for further details.",
-            master=self.tkconsole.text)
+            parent=self.tkconsole.text)
 
     def display_no_subprocess_error(self):
         tkMessageBox.showerror(
@@ -847,14 +845,14 @@
             "IDLE's subprocess didn't make connection.  Either IDLE can't "
             "start a subprocess or personal firewall software is blocking "
             "the connection.",
-            master=self.tkconsole.text)
+            parent=self.tkconsole.text)
 
     def display_executing_dialog(self):
         tkMessageBox.showerror(
             "Already executing",
             "The Python Shell window is already executing a command; "
             "please wait until it is finished.",
-            master=self.tkconsole.text)
+            parent=self.tkconsole.text)
 
 
 class PyShell(OutputWindow):
@@ -950,7 +948,7 @@
         if self.executing:
             tkMessageBox.showerror("Don't debug now",
                 "You can only toggle the debugger when idle",
-                master=self.text)
+                parent=self.text)
             self.set_debugger_indicator()
             return "break"
         else:
@@ -1008,7 +1006,7 @@
         if self.executing:
             response = tkMessageBox.askokcancel(
                 "Kill?",
-                "The program is still running!\n Do you want to kill it?",
+                "Your program is still running!\n Do you want to kill it?",
                 default="ok",
                 parent=self.text)
             if response is False:
@@ -1056,6 +1054,7 @@
             nosub = "==== No Subprocess ===="
         self.write("Python %s on %s\n%s\n%s" %
                    (sys.version, sys.platform, self.COPYRIGHT, nosub))
+        self.text.focus_force()
         self.showprompt()
         import Tkinter
         Tkinter._default_root = None # 03Jan04 KBK What's this?
@@ -1246,7 +1245,7 @@
         while i > 0 and line[i-1] in " \t":
             i = i-1
         line = line[:i]
-        more = self.interp.runsource(line)
+        self.interp.runsource(line)
 
     def open_stack_viewer(self, event=None):
         if self.interp.rpcclt:
@@ -1257,10 +1256,10 @@
             tkMessageBox.showerror("No stack trace",
                 "There is no stack trace yet.\n"
                 "(sys.last_traceback is not defined)",
-                master=self.text)
+                parent=self.text)
             return
         from idlelib.StackViewer import StackBrowser
-        sv = StackBrowser(self.root, self.flist)
+        StackBrowser(self.root, self.flist)
 
     def view_restart_mark(self, event=None):
         self.text.see("iomark")
@@ -1558,6 +1557,14 @@
     flist = PyShellFileList(root)
     macosxSupport.setupApp(root, flist)
 
+    if macosxSupport.isAquaTk():
+        # There are some screwed up <2> class bindings for text
+        # widgets defined in Tk which we need to do away with.
+        # See issue #24801.
+        root.unbind_class('Text', '<B2>')
+        root.unbind_class('Text', '<B2-Motion>')
+        root.unbind_class('Text', '<<PasteSelection>>')
+
     if enable_edit:
         if not (cmd or script):
             for filename in args[:]:
diff --git a/lib/python2.7/idlelib/README.txt b/lib/python2.7/idlelib/README.txt
index 101f7eb..7bf74c0 100644
--- a/lib/python2.7/idlelib/README.txt
+++ b/lib/python2.7/idlelib/README.txt
@@ -1,63 +1,229 @@
-IDLE is Python's Tkinter-based Integrated DeveLopment Environment.
+README.txt: an index to idlelib files and the IDLE menu.
 
-IDLE emphasizes a lightweight, clean design with a simple user interface.
-Although it is suitable for beginners, even advanced users will find that
-IDLE has everything they really need to develop pure Python code.
+IDLE is Python's Integrated Development and Learning
+Environment.  The user documentation is part of the Library Reference and
+is available in IDLE by selecting Help => IDLE Help.  This README documents
+idlelib for IDLE developers and curious users.
 
-IDLE features a multi-window text editor with multiple undo, Python colorizing,
-and many other capabilities, e.g. smart indent, call tips, and autocompletion.
+IDLELIB FILES lists files alphabetically by category,
+with a short description of each.
 
-The editor has comprehensive search functions, including searching through
-multiple files.  Class browsers and path browsers provide fast access to
-code objects from a top level viewpoint without dealing with code folding.
+IDLE MENU show the menu tree, annotated with the module
+or module object that implements the corresponding function.
 
-There is a Python Shell window which features colorizing and command recall.
-
-IDLE executes Python code in a separate process, which is restarted for each
-Run (F5) initiated from an editor window.  The environment can also be 
-restarted from the Shell window without restarting IDLE.
-
-This enhancement has often been requested, and is now finally available.  The
-magic "reload/import *" incantations are no longer required when editing and
-testing a module two or three steps down the import chain.
-
-(Personal firewall software may warn about the connection IDLE makes to its
-subprocess using this computer's internal loopback interface.  This connection
-is not visible on any external interface and no data is sent to or received
-from the Internet.)
-
-It is possible to interrupt tightly looping user code, even on Windows.
-
-Applications which cannot support subprocesses and/or sockets can still run
-IDLE in a single process.
-
-IDLE has an integrated debugger with stepping, persistent breakpoints, and call
-stack visibility.
-
-There is a GUI configuration manager which makes it easy to select fonts,
-colors, keybindings, and startup options.  This facility includes a feature
-which allows the user to specify additional help sources, either locally or on
-the web.
-
-IDLE is coded in 100% pure Python, using the Tkinter GUI toolkit (Tk/Tcl)
-and is cross-platform, working on Unix, Mac, and Windows.
-
-IDLE accepts command line arguments.  Try idle -h to see the options.
+This file is descriptive, not prescriptive, and may have errors
+and omissions and lag behind changes in idlelib.
 
 
-If you find bugs or have suggestions, let us know about them by using the
-Python Bug Tracker:
+IDLELIB FILES
+Implemetation files not in IDLE MENU are marked (nim).
+Deprecated files and objects are listed separately as the end.
 
-http://sourceforge.net/projects/python
+Startup
+-------
+__init__.py  # import, does nothing
+__main__.py  # -m, starts IDLE
+idle.bat
+idle.py
+idle.pyw
 
-Patches are always appreciated at the Python Patch Tracker, and change
-requests should be posted to the RFE Tracker.
+Implementation
+--------------
+AutoComplete.py   # Complete attribute names or filenames.
+AutoCompleteWindow.py  # Display completions.
+AutoExpand.py     # Expand word with previous word in file.
+Bindings.py       # Define most of IDLE menu.
+CallTipWindow.py  # Display calltip.
+CallTips.py       # Create calltip text.
+ClassBrowser.py   # Create module browser window.
+CodeContext.py    # Show compound statement headers otherwise not visible.
+ColorDelegator.py # Colorize text (nim).
+Debugger.py       # Debug code run from editor; show window.
+Delegator.py      # Define base class for delegators (nim).
+EditorWindow.py   # Define most of editor and utility functions.
+FileList.py       # Open files and manage list of open windows (nim).
+FormatParagraph.py# Re-wrap multiline strings and comments.
+GrepDialog.py     # Find all occurrences of pattern in multiple files.
+HyperParser.py    # Parse code around a given index.
+IOBinding.py      # Open, read, and write files
+IdleHistory.py    # Get previous or next user input in shell (nim)
+MultiCall.py      # Wrap tk widget to allow multiple calls per event (nim).
+MultiStatusBar.py # Define status bar for windows (nim).
+ObjectBrowser.py  # Define class used in StackViewer (nim).
+OutputWindow.py   # Create window for grep output.
+ParenMatch.py     # Match fenceposts: (), [], and {}.
+PathBrowser.py    # Create path browser window.
+Percolator.py     # Manage delegator stack (nim).
+PyParse.py        # Give information on code indentation
+PyShell.py        # Start IDLE, manage shell, complete editor window
+RemoteDebugger.py # Debug code run in remote process.
+RemoteObjectBrowser.py # Communicate objects between processes with rpc (nim).
+ReplaceDialog.py  # Search and replace pattern in text.
+RstripExtension.py# Strip trailing whitespace
+ScriptBinding.py  # Check and run user code.
+ScrolledList.py   # Define ScrolledList widget for IDLE (nim).
+SearchDialog.py   # Search for pattern in text.
+SearchDialogBase.py  # Define base for search, replace, and grep dialogs.
+SearchEngine.py   # Define engine for all 3 search dialogs.
+StackViewer.py    # View stack after exception.
+TreeWidget.py     # Define tree widger, used in browsers (nim).
+UndoDelegator.py  # Manage undo stack.
+WidgetRedirector.py # Intercept widget subcommands (for percolator) (nim).
+WindowList.py     # Manage window list and define listed top level.
+ZoomHeight.py     # Zoom window to full height of screen.
+aboutDialog.py    # Display About IDLE dialog.
+configDialog.py   # Display user configuration dialogs.
+configHandler.py  # Load, fetch, and save configuration (nim).
+configHelpSourceEdit.py  # Specify help source.
+configSectionNameDialog.py  # Spefify user config section name
+dynOptionMenuWidget.py  # define mutable OptionMenu widget (nim).
+help.py           # Display IDLE's html doc.
+keybindingDialog.py  # Change keybindings.
+macosxSupport.py  # Help IDLE run on Macs (nim).
+rpc.py            # Commuicate between idle and user processes (nim).
+run.py            # Manage user code execution subprocess.
+tabbedpages.py    # Define tabbed pages widget (nim).
+textView.py       # Define read-only text widget (nim).
 
-For further details and links, read the Help files and check the IDLE home
-page at
+Configuration
+-------------
+config-extensions.def # Defaults for extensions
+config-highlight.def  # Defaults for colorizing
+config-keys.def       # Defaults for key bindings
+config-main.def       # Defai;ts fpr font and geneal
 
-http://www.python.org/idle/
+Text
+----
+CREDITS.txt  # not maintained, displayed by About IDLE
+HISTORY.txt  # NEWS up to July 2001
+NEWS.txt     # commits, displayed by About IDLE
+README.txt   # this file, displeyed by About IDLE
+TODO.txt     # needs review
+extend.txt   # about writing extensions
+help.html    # copy of idle.html in docs, displayed by IDLE Help
 
-There is a mail list for IDLE: idle-dev@python.org.  You can join at
+Subdirectories
+--------------
+Icons  # small image files
+idle_test  # files for human test and automated unit tests
 
-http://mail.python.org/mailman/listinfo/idle-dev
+Unused and Deprecated files and objects (nim)
+---------------------------------------------
+EditorWindow.py: Helpdialog and helpDialog
+ToolTip.py: unused.
+help.txt
+idlever.py
+
+
+IDLE MENUS
+Top level items and most submenu items are defined in Bindings.
+Extenstions add submenu items when active.  The names given are
+found, quoted, in one of these modules, paired with a '<<pseudoevent>>'.
+Each pseudoevent is bound to an event handler.  Some event handlers
+call another function that does the actual work.  The annotations below
+are intended to at least give the module where the actual work is done.
+
+File  # IOBindig except as noted
+  New File
+  Open...  # IOBinding.open
+  Open Module
+  Recent Files
+  Class Browser  # Class Browser
+  Path Browser  # Path Browser
+  ---
+  Save  # IDBinding.save
+  Save As...  # IOBinding.save_as
+  Save Copy As...  # IOBindling.save_a_copy
+  ---
+  Print Window  # IOBinding.print_window
+  ---
+  Close
+  Exit
+
+Edit
+  Undo  # undoDelegator
+  Redo  # undoDelegator
+  ---
+  Cut
+  Copy
+  Paste
+  Select All
+  ---  # Next 5 items use SearchEngine; dialogs use SearchDialogBase
+  Find  # Search Dialog
+  Find Again
+  Find Selection
+  Find in Files...  # GrepDialog
+  Replace...  # ReplaceDialog
+  Go to Line
+  Show Completions  # AutoComplete extension and AutoCompleteWidow (&HP)
+  Expand Word  # AutoExpand extension
+  Show call tip  # Calltips extension and CalltipWindow (& Hyperparser)
+  Show surrounding parens  # ParenMatch (& Hyperparser)
+
+Shell  # PyShell
+  View Last Restart  # PyShell.?
+  Restart Shell  # PyShell.?
+
+Debug (Shell only)
+  Go to File/Line
+  Debugger  # Debugger, RemoteDebugger
+  Stack Viewer  # StackViewer
+  Auto-open Stack Viewer  # StackViewer
+
+Format (Editor only)
+  Indent Region
+  Dedent Region
+  Comment Out Region
+  Uncomment Region
+  Tabify Region
+  Untabify Region
+  Toggle Tabs
+  New Indent Width
+  Format Paragraph  # FormatParagraph extension
+  ---
+  Strip tailing whitespace  # RstripExtension extension
+
+Run (Editor only)
+  Python Shell  # PyShell
+  ---
+  Check Module  # ScriptBinding
+  Run Module  # ScriptBinding
+
+Options
+  Configure IDLE  # configDialog
+    (tabs in the dialog)
+    Font tab  # onfig-main.def
+    Highlight tab  # configSectionNameDialog, config-highlight.def
+    Keys tab  # keybindingDialog, configSectionNameDialog, onfig-keus.def
+    General tab  # configHelpSourceEdit, config-main.def
+  Configure Extensions  # configDialog
+    Xyz tab  # xyz.py, config-extensions.def
+  ---
+  Code Context (editor only)  # CodeContext extension
+
+Window
+  Zoomheight  # ZoomHeight extension
+  ---
+  <open windows>  # WindowList
+
+Help
+  About IDLE  # aboutDialog
+  ---
+  IDLE Help  # help
+  Python Doc
+  Turtle Demo
+  ---
+  <other help sources>
+
+<Context Menu> (right click)
+Defined in EditorWindow, PyShell, Output
+   Cut
+   Copy
+   Paste
+   ---
+   Go to file/line (shell and output only)
+   Set Breakpoint (editor only)
+   Clear Breakpoint (editor only)
+ Defined in Debugger
+   Go to source line
+   Show stack frame
diff --git a/lib/python2.7/idlelib/RemoteDebugger.py b/lib/python2.7/idlelib/RemoteDebugger.py
index 647285f..8c71a21 100644
--- a/lib/python2.7/idlelib/RemoteDebugger.py
+++ b/lib/python2.7/idlelib/RemoteDebugger.py
@@ -21,7 +21,6 @@
 """
 
 import types
-from idlelib import rpc
 from idlelib import Debugger
 
 debugging = 0
@@ -101,7 +100,7 @@
             tb = tracebacktable[tbid]
         stack, i = self.idb.get_stack(frame, tb)
         ##print >>sys.__stderr__, "get_stack() ->", stack
-        stack = [(wrap_frame(frame), k) for frame, k in stack]
+        stack = [(wrap_frame(frame2), k) for frame2, k in stack]
         ##print >>sys.__stderr__, "get_stack() ->", stack
         return stack, i
 
diff --git a/lib/python2.7/idlelib/ScriptBinding.py b/lib/python2.7/idlelib/ScriptBinding.py
index ab2a3f2..0309a8a 100644
--- a/lib/python2.7/idlelib/ScriptBinding.py
+++ b/lib/python2.7/idlelib/ScriptBinding.py
@@ -71,7 +71,7 @@
         try:
             tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
         except tokenize.TokenError as msg:
-            msgtxt, (lineno, start) = msg
+            msgtxt, (lineno, start) = msg.args
             self.editwin.gotoline(lineno)
             self.errorbox("Tabnanny Tokenizing Error",
                           "Token Error: %s" % msgtxt)
@@ -147,7 +147,7 @@
             return 'break'
         interp = self.shell.interp
         if PyShell.use_subprocess:
-            interp.restart_subprocess(with_cwd=False)
+            interp.restart_subprocess(with_cwd=False, filename=code.co_filename)
         dirname = os.path.dirname(filename)
         # XXX Too often this discards arguments the user just set...
         interp.runcommand("""if 1:
@@ -213,10 +213,10 @@
         confirm = tkMessageBox.askokcancel(title="Save Before Run or Check",
                                            message=msg,
                                            default=tkMessageBox.OK,
-                                           master=self.editwin.text)
+                                           parent=self.editwin.text)
         return confirm
 
     def errorbox(self, title, message):
         # XXX This should really be a function of EditorWindow...
-        tkMessageBox.showerror(title, message, master=self.editwin.text)
+        tkMessageBox.showerror(title, message, parent=self.editwin.text)
         self.editwin.text.focus_set()
diff --git a/lib/python2.7/idlelib/ScrolledList.py b/lib/python2.7/idlelib/ScrolledList.py
index e235661..fd9f0ff 100644
--- a/lib/python2.7/idlelib/ScrolledList.py
+++ b/lib/python2.7/idlelib/ScrolledList.py
@@ -1,4 +1,5 @@
 from Tkinter import *
+from idlelib import macosxSupport
 
 class ScrolledList:
 
@@ -22,7 +23,11 @@
         # Bind events to the list box
         listbox.bind("<ButtonRelease-1>", self.click_event)
         listbox.bind("<Double-ButtonRelease-1>", self.double_click_event)
-        listbox.bind("<ButtonPress-3>", self.popup_event)
+        if macosxSupport.isAquaTk():
+            listbox.bind("<ButtonPress-2>", self.popup_event)
+            listbox.bind("<Control-Button-1>", self.popup_event)
+        else:
+            listbox.bind("<ButtonPress-3>", self.popup_event)
         listbox.bind("<Key-Up>", self.up_event)
         listbox.bind("<Key-Down>", self.down_event)
         # Mark as empty
diff --git a/lib/python2.7/idlelib/SearchDialog.py b/lib/python2.7/idlelib/SearchDialog.py
index 2aadb84..043168a 100644
--- a/lib/python2.7/idlelib/SearchDialog.py
+++ b/lib/python2.7/idlelib/SearchDialog.py
@@ -23,7 +23,7 @@
 class SearchDialog(SearchDialogBase):
 
     def create_widgets(self):
-        f = SearchDialogBase.create_widgets(self)
+        SearchDialogBase.create_widgets(self)
         self.make_button("Find Next", self.default_command, 1)
 
     def default_command(self, event=None):
diff --git a/lib/python2.7/idlelib/StackViewer.py b/lib/python2.7/idlelib/StackViewer.py
index ccf6283..555a08c 100644
--- a/lib/python2.7/idlelib/StackViewer.py
+++ b/lib/python2.7/idlelib/StackViewer.py
@@ -10,8 +10,7 @@
 
 def StackBrowser(root, flist=None, tb=None, top=None):
     if top is None:
-        from Tkinter import Toplevel
-        top = Toplevel(root)
+        top = tk.Toplevel(root)
     sc = ScrolledCanvas(top, bg="white", highlightthickness=0)
     sc.frame.pack(expand=1, fill="both")
     item = StackTreeItem(flist, tb)
@@ -108,12 +107,9 @@
     def IsExpandable(self):
         return len(self.object) > 0
 
-    def keys(self):
-        return self.object.keys()
-
     def GetSubList(self):
         sublist = []
-        for key in self.keys():
+        for key in self.object.keys():
             try:
                 value = self.object[key]
             except KeyError:
@@ -124,7 +120,10 @@
             sublist.append(item)
         return sublist
 
-def _stack_viewer(parent):
+    def keys(self):  # unused, left for possible 3rd party use
+        return self.object.keys()
+
+def _stack_viewer(parent):  # htest #
     root = tk.Tk()
     root.title("Test StackViewer")
     width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
diff --git a/lib/python2.7/idlelib/TreeWidget.py b/lib/python2.7/idlelib/TreeWidget.py
index 1e56e86..9d9d4d9 100644
--- a/lib/python2.7/idlelib/TreeWidget.py
+++ b/lib/python2.7/idlelib/TreeWidget.py
@@ -246,11 +246,11 @@
         else:
             self.edit_finish()
         try:
-            label = self.label
+            self.label
         except AttributeError:
             # padding carefully selected (on Windows) to match Entry widget:
             self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
-        theme = idleConf.GetOption('main','Theme','name')
+        theme = idleConf.CurrentTheme()
         if self.selected:
             self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
         else:
diff --git a/lib/python2.7/idlelib/WidgetRedirector.py b/lib/python2.7/idlelib/WidgetRedirector.py
index 103bfd0..2cb9398 100644
--- a/lib/python2.7/idlelib/WidgetRedirector.py
+++ b/lib/python2.7/idlelib/WidgetRedirector.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 from Tkinter import TclError
 
 class WidgetRedirector:
@@ -161,7 +162,7 @@
     text.focus_set()
     redir = WidgetRedirector(text)
     def my_insert(*args):
-        print "insert", args
+        print("insert", args)
         original_insert(*args)
     original_insert = redir.register("insert", my_insert)
     root.mainloop()
diff --git a/lib/python2.7/idlelib/__init__.py b/lib/python2.7/idlelib/__init__.py
index 7a83dde..32b7eac 100644
--- a/lib/python2.7/idlelib/__init__.py
+++ b/lib/python2.7/idlelib/__init__.py
@@ -1 +1,8 @@
-# Dummy file to make this a package.
+"""The idlelib package implements the Idle application.
+
+Idle includes an interactive shell and editor.
+Use the files named idle.* to start Idle.
+
+The other files are private implementations.  Their details are subject
+to change.  See PEP 434 for more.  Import them at your own risk.
+"""
diff --git a/lib/python2.7/idlelib/aboutDialog.py b/lib/python2.7/idlelib/aboutDialog.py
index 1c3eba1..40ea8ec 100644
--- a/lib/python2.7/idlelib/aboutDialog.py
+++ b/lib/python2.7/idlelib/aboutDialog.py
@@ -1,12 +1,10 @@
 """About Dialog for IDLE
 
 """
-
-from Tkinter import *
 import os
-
+from sys import version
+from Tkinter import *
 from idlelib import textView
-from idlelib import idlever
 
 class AboutDialog(Toplevel):
     """Modal about dialog for idle
@@ -37,6 +35,7 @@
         self.wait_window()
 
     def CreateWidgets(self):
+        release = version[:version.index(' ')]
         frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
         frameButtons = Frame(self)
         frameButtons.pack(side=BOTTOM, fill=X)
@@ -63,14 +62,14 @@
         labelEmail.grid(row=6, column=0, columnspan=2,
                         sticky=W, padx=10, pady=0)
         labelWWW = Label(frameBg, text='https://docs.python.org/' +
-                         sys.version[:3] + '/library/idle.html',
+                         version[:3] + '/library/idle.html',
                          justify=LEFT, fg=self.fg, bg=self.bg)
         labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
         Frame(frameBg, borderwidth=1, relief=SUNKEN,
               height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
                                          columnspan=3, padx=5, pady=5)
-        labelPythonVer = Label(frameBg, text='Python version:  ' + \
-                               sys.version.split()[0], fg=self.fg, bg=self.bg)
+        labelPythonVer = Label(frameBg, text='Python version:  ' +
+                               release, fg=self.fg, bg=self.bg)
         labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
         tkVer = self.tk.call('info', 'patchlevel')
         labelTkVer = Label(frameBg, text='Tk version:  '+
@@ -93,7 +92,7 @@
         Frame(frameBg, borderwidth=1, relief=SUNKEN,
               height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
                                          columnspan=3, padx=5, pady=5)
-        idle_v = Label(frameBg, text='IDLE version:   ' + idlever.IDLE_VERSION,
+        idle_v = Label(frameBg, text='IDLE version:   ' + release,
                        fg=self.fg, bg=self.bg)
         idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
         idle_button_f = Frame(frameBg, bg=self.bg)
@@ -111,6 +110,7 @@
                                 command=self.ShowIDLECredits)
         idle_credits_b.pack(side=LEFT, padx=10, pady=10)
 
+    # License, et all, are of type _sitebuiltins._Printer
     def ShowLicense(self):
         self.display_printer_text('About - License', license)
 
@@ -120,14 +120,16 @@
     def ShowPythonCredits(self):
         self.display_printer_text('About - Python Credits', credits)
 
+    # Encode CREDITS.txt to utf-8 for proper version of Loewis.
+    # Specify others as ascii until need utf-8, so catch errors.
     def ShowIDLECredits(self):
-        self.display_file_text('About - Credits', 'CREDITS.txt', 'iso-8859-1')
+        self.display_file_text('About - Credits', 'CREDITS.txt', 'utf-8')
 
     def ShowIDLEAbout(self):
-        self.display_file_text('About - Readme', 'README.txt')
+        self.display_file_text('About - Readme', 'README.txt', 'ascii')
 
     def ShowIDLENEWS(self):
-        self.display_file_text('About - NEWS', 'NEWS.txt')
+        self.display_file_text('About - NEWS', 'NEWS.txt', 'ascii')
 
     def display_printer_text(self, title, printer):
         printer._Printer__setup()
diff --git a/lib/python2.7/idlelib/config-highlight.def b/lib/python2.7/idlelib/config-highlight.def
index 7d20f78..4146e28 100644
--- a/lib/python2.7/idlelib/config-highlight.def
+++ b/lib/python2.7/idlelib/config-highlight.def
@@ -62,3 +62,32 @@
 stderr-background= #ffffff
 console-foreground= #770000
 console-background= #ffffff
+
+[IDLE Dark]
+comment-foreground = #dd0000
+console-foreground = #ff4d4d
+error-foreground = #FFFFFF
+hilite-background = #7e7e7e
+string-foreground = #02ff02
+stderr-background = #002240
+stderr-foreground = #ffb3b3
+console-background = #002240
+hit-background = #fbfbfb
+string-background = #002240
+normal-background = #002240
+hilite-foreground = #FFFFFF
+keyword-foreground = #ff8000
+error-background = #c86464
+keyword-background = #002240
+builtin-background = #002240
+break-background = #808000
+builtin-foreground = #ff00ff
+definition-foreground = #5e5eff
+stdout-foreground = #c2d1fa
+definition-background = #002240
+normal-foreground = #FFFFFF
+cursor-foreground = #ffffff
+stdout-background = #002240
+hit-foreground = #002240
+comment-background = #002240
+break-foreground = #FFFFFF
diff --git a/lib/python2.7/idlelib/config-main.def b/lib/python2.7/idlelib/config-main.def
index d7ad59f..f241199 100644
--- a/lib/python2.7/idlelib/config-main.def
+++ b/lib/python2.7/idlelib/config-main.def
@@ -53,7 +53,7 @@
 [EditorWindow]
 width= 80
 height= 40
-font= courier
+font= TkFixedFont
 font-size= 10
 font-bold= 0
 encoding= none
@@ -65,6 +65,8 @@
 [Theme]
 default= 1
 name= IDLE Classic
+name2=
+# name2 set in user config-main.cfg for themes added after 2015 Oct 1
 
 [Keys]
 default= 1
diff --git a/lib/python2.7/idlelib/configDialog.py b/lib/python2.7/idlelib/configDialog.py
index c416151..585a871 100644
--- a/lib/python2.7/idlelib/configDialog.py
+++ b/lib/python2.7/idlelib/configDialog.py
@@ -14,12 +14,13 @@
 
 from idlelib.configHandler import idleConf
 from idlelib.dynOptionMenuWidget import DynOptionMenu
-from idlelib.tabbedpages import TabbedPageSet
 from idlelib.keybindingDialog import GetKeysDialog
 from idlelib.configSectionNameDialog import GetCfgSectionNameDialog
 from idlelib.configHelpSourceEdit import GetHelpSourceDialog
 from idlelib.tabbedpages import TabbedPageSet
+from idlelib.textView import view_text
 from idlelib import macosxSupport
+
 class ConfigDialog(Toplevel):
 
     def __init__(self, parent, title='', _htest=False, _utest=False):
@@ -42,19 +43,20 @@
         #The first value of the tuple is the sample area tag name.
         #The second value is the display name list sort index.
         self.themeElements={
-            'Normal Text':('normal', '00'),
-            'Python Keywords':('keyword', '01'),
-            'Python Definitions':('definition', '02'),
-            'Python Builtins':('builtin', '03'),
-            'Python Comments':('comment', '04'),
-            'Python Strings':('string', '05'),
-            'Selected Text':('hilite', '06'),
-            'Found Text':('hit', '07'),
-            'Cursor':('cursor', '08'),
-            'Error Text':('error', '09'),
-            'Shell Normal Text':('console', '10'),
-            'Shell Stdout Text':('stdout', '11'),
-            'Shell Stderr Text':('stderr', '12'),
+            'Normal Text': ('normal', '00'),
+            'Python Keywords': ('keyword', '01'),
+            'Python Definitions': ('definition', '02'),
+            'Python Builtins': ('builtin', '03'),
+            'Python Comments': ('comment', '04'),
+            'Python Strings': ('string', '05'),
+            'Selected Text': ('hilite', '06'),
+            'Found Text': ('hit', '07'),
+            'Cursor': ('cursor', '08'),
+            'Editor Breakpoint': ('break', '09'),
+            'Shell Normal Text': ('console', '10'),
+            'Shell Error Text': ('error', '11'),
+            'Shell Stdout Text': ('stdout', '12'),
+            'Shell Stderr Text': ('stderr', '13'),
             }
         self.ResetChangedItems() #load initial values in changed items dict
         self.CreateWidgets()
@@ -76,13 +78,16 @@
 
     def CreateWidgets(self):
         self.tabPages = TabbedPageSet(self,
-                page_names=['Fonts/Tabs', 'Highlighting', 'Keys', 'General'])
+                page_names=['Fonts/Tabs', 'Highlighting', 'Keys', 'General',
+                            'Extensions'])
         self.tabPages.pack(side=TOP, expand=TRUE, fill=BOTH)
         self.CreatePageFontTab()
         self.CreatePageHighlight()
         self.CreatePageKeys()
         self.CreatePageGeneral()
+        self.CreatePageExtensions()
         self.create_action_buttons().pack(side=BOTTOM)
+
     def create_action_buttons(self):
         if macosxSupport.isAquaTk():
             # Changing the default padding on OSX results in unreadable
@@ -92,28 +97,18 @@
             paddingArgs = {'padx':6, 'pady':3}
         outer = Frame(self, pady=2)
         buttons = Frame(outer, pady=2)
-        self.buttonOk = Button(
-                buttons, text='Ok', command=self.Ok,
-                takefocus=FALSE, **paddingArgs)
-        self.buttonApply = Button(
-                buttons, text='Apply', command=self.Apply,
-                takefocus=FALSE, **paddingArgs)
-        self.buttonCancel = Button(
-                buttons, text='Cancel', command=self.Cancel,
-                takefocus=FALSE, **paddingArgs)
-        self.buttonOk.pack(side=LEFT, padx=5)
-        self.buttonApply.pack(side=LEFT, padx=5)
-        self.buttonCancel.pack(side=LEFT, padx=5)
-# Comment out Help button creation and packing until implement self.Help
-##        self.buttonHelp = Button(
-##                buttons, text='Help', command=self.Help,
-##                takefocus=FALSE, **paddingArgs)
-##        self.buttonHelp.pack(side=RIGHT, padx=5)
-
+        for txt, cmd in (
+            ('Ok', self.Ok),
+            ('Apply', self.Apply),
+            ('Cancel', self.Cancel),
+            ('Help', self.Help)):
+            Button(buttons, text=txt, command=cmd, takefocus=FALSE,
+                   **paddingArgs).pack(side=LEFT, padx=5)
         # add space above buttons
         Frame(outer, height=2, borderwidth=0).pack(side=TOP)
         buttons.pack(side=BOTTOM)
         return outer
+
     def CreatePageFontTab(self):
         parent = self.parent
         self.fontSize = StringVar(parent)
@@ -218,7 +213,8 @@
             ("'selected'", 'hilite'), ('\n  var2 = ', 'normal'),
             ("'found'", 'hit'), ('\n  var3 = ', 'normal'),
             ('list', 'builtin'), ('(', 'normal'),
-            ('None', 'builtin'), (')\n\n', 'normal'),
+            ('None', 'builtin'), (')\n', 'normal'),
+            ('  breakpoint("line")', 'break'), ('\n\n', 'normal'),
             (' error ', 'error'), (' ', 'normal'),
             ('cursor |', 'cursor'), ('\n ', 'normal'),
             ('shell', 'console'), (' ', 'normal'),
@@ -265,6 +261,7 @@
         self.buttonDeleteCustomTheme=Button(
                 frameTheme, text='Delete Custom Theme',
                 command=self.DeleteCustomTheme)
+        self.new_custom_theme = Label(frameTheme, bd=2)
 
         ##widget packing
         #body
@@ -288,6 +285,7 @@
         self.optMenuThemeBuiltin.pack(side=TOP, fill=X, padx=5, pady=5)
         self.optMenuThemeCustom.pack(side=TOP, fill=X, anchor=W, padx=5, pady=5)
         self.buttonDeleteCustomTheme.pack(side=TOP, fill=X, padx=5, pady=5)
+        self.new_custom_theme.pack(side=TOP, fill=X, pady=5)
         return frame
 
     def CreatePageKeys(self):
@@ -483,9 +481,9 @@
         return frame
 
     def AttachVarCallbacks(self):
-        self.fontSize.trace_variable('w', self.VarChanged_fontSize)
-        self.fontName.trace_variable('w', self.VarChanged_fontName)
-        self.fontBold.trace_variable('w', self.VarChanged_fontBold)
+        self.fontSize.trace_variable('w', self.VarChanged_font)
+        self.fontName.trace_variable('w', self.VarChanged_font)
+        self.fontBold.trace_variable('w', self.VarChanged_font)
         self.spaceNum.trace_variable('w', self.VarChanged_spaceNum)
         self.colour.trace_variable('w', self.VarChanged_colour)
         self.builtinTheme.trace_variable('w', self.VarChanged_builtinTheme)
@@ -502,15 +500,15 @@
         self.autoSave.trace_variable('w', self.VarChanged_autoSave)
         self.encoding.trace_variable('w', self.VarChanged_encoding)
 
-    def VarChanged_fontSize(self, *params):
-        value = self.fontSize.get()
-        self.AddChangedItem('main', 'EditorWindow', 'font-size', value)
-
-    def VarChanged_fontName(self, *params):
+    def VarChanged_font(self, *params):
+        '''When one font attribute changes, save them all, as they are
+        not independent from each other. In particular, when we are
+        overriding the default font, we need to write out everything.
+        '''
         value = self.fontName.get()
         self.AddChangedItem('main', 'EditorWindow', 'font', value)
-
-    def VarChanged_fontBold(self, *params):
+        value = self.fontSize.get()
+        self.AddChangedItem('main', 'EditorWindow', 'font-size', value)
         value = self.fontBold.get()
         self.AddChangedItem('main', 'EditorWindow', 'font-bold', value)
 
@@ -523,7 +521,16 @@
 
     def VarChanged_builtinTheme(self, *params):
         value = self.builtinTheme.get()
-        self.AddChangedItem('main', 'Theme', 'name', value)
+        if value == 'IDLE Dark':
+            if idleConf.GetOption('main', 'Theme', 'name') != 'IDLE New':
+                self.AddChangedItem('main', 'Theme', 'name', 'IDLE Classic')
+            self.AddChangedItem('main', 'Theme', 'name2', value)
+            self.new_custom_theme.config(text='New theme, see Help',
+                                         fg='#500000')
+        else:
+            self.AddChangedItem('main', 'Theme', 'name', value)
+            self.AddChangedItem('main', 'Theme', 'name2', '')
+            self.new_custom_theme.config(text='', fg='black')
         self.PaintThemeSample()
 
     def VarChanged_customTheme(self, *params):
@@ -976,24 +983,24 @@
         fonts.sort()
         for font in fonts:
             self.listFontName.insert(END, font)
-        configuredFont = idleConf.GetOption(
-                'main', 'EditorWindow', 'font', default='courier')
-        lc_configuredFont = configuredFont.lower()
-        self.fontName.set(lc_configuredFont)
+        configuredFont = idleConf.GetFont(self, 'main', 'EditorWindow')
+        fontName = configuredFont[0].lower()
+        fontSize = configuredFont[1]
+        fontBold  = configuredFont[2]=='bold'
+        self.fontName.set(fontName)
         lc_fonts = [s.lower() for s in fonts]
-        if lc_configuredFont in lc_fonts:
-            currentFontIndex = lc_fonts.index(lc_configuredFont)
+        try:
+            currentFontIndex = lc_fonts.index(fontName)
             self.listFontName.see(currentFontIndex)
             self.listFontName.select_set(currentFontIndex)
             self.listFontName.select_anchor(currentFontIndex)
+        except ValueError:
+            pass
         ##font size dropdown
-        fontSize = idleConf.GetOption(
-                'main', 'EditorWindow', 'font-size', type='int', default='10')
         self.optMenuFontSize.SetMenu(('7', '8', '9', '10', '11', '12', '13',
                                       '14', '16', '18', '20', '22'), fontSize )
         ##fontWeight
-        self.fontBold.set(idleConf.GetOption(
-                'main', 'EditorWindow', 'font-bold', default=0, type='bool'))
+        self.fontBold.set(fontBold)
         ##font sample
         self.SetFontSample()
 
@@ -1101,6 +1108,7 @@
         self.LoadKeyCfg()
         ### general page
         self.LoadGeneralCfg()
+        # note: extension page handled separately
 
     def SaveNewKeySet(self, keySetName, keySet):
         """
@@ -1154,6 +1162,7 @@
             # save these even if unchanged!
             idleConf.userCfg[configType].Save()
         self.ResetChangedItems() #clear the changed items dict
+        self.save_all_changed_extensions()  # uses a different mechanism
 
     def DeactivateCurrentConfig(self):
         #Before a config is saved, some cleanup of current
@@ -1185,7 +1194,200 @@
         self.ActivateConfigChanges()
 
     def Help(self):
-        pass
+        page = self.tabPages._current_page
+        view_text(self, title='Help for IDLE preferences',
+                 text=help_common+help_pages.get(page, ''))
+
+    def CreatePageExtensions(self):
+        """Part of the config dialog used for configuring IDLE extensions.
+
+        This code is generic - it works for any and all IDLE extensions.
+
+        IDLE extensions save their configuration options using idleConf.
+        This code reads the current configuration using idleConf, supplies a
+        GUI interface to change the configuration values, and saves the
+        changes using idleConf.
+
+        Not all changes take effect immediately - some may require restarting IDLE.
+        This depends on each extension's implementation.
+
+        All values are treated as text, and it is up to the user to supply
+        reasonable values. The only exception to this are the 'enable*' options,
+        which are boolean, and can be toggled with an True/False button.
+        """
+        parent = self.parent
+        frame = self.tabPages.pages['Extensions'].frame
+        self.ext_defaultCfg = idleConf.defaultCfg['extensions']
+        self.ext_userCfg = idleConf.userCfg['extensions']
+        self.is_int = self.register(is_int)
+        self.load_extensions()
+        # create widgets - a listbox shows all available extensions, with the
+        # controls for the extension selected in the listbox to the right
+        self.extension_names = StringVar(self)
+        frame.rowconfigure(0, weight=1)
+        frame.columnconfigure(2, weight=1)
+        self.extension_list = Listbox(frame, listvariable=self.extension_names,
+                                      selectmode='browse')
+        self.extension_list.bind('<<ListboxSelect>>', self.extension_selected)
+        scroll = Scrollbar(frame, command=self.extension_list.yview)
+        self.extension_list.yscrollcommand=scroll.set
+        self.details_frame = LabelFrame(frame, width=250, height=250)
+        self.extension_list.grid(column=0, row=0, sticky='nws')
+        scroll.grid(column=1, row=0, sticky='ns')
+        self.details_frame.grid(column=2, row=0, sticky='nsew', padx=[10, 0])
+        frame.configure(padx=10, pady=10)
+        self.config_frame = {}
+        self.current_extension = None
+
+        self.outerframe = self                      # TEMPORARY
+        self.tabbed_page_set = self.extension_list  # TEMPORARY
+
+        # create the frame holding controls for each extension
+        ext_names = ''
+        for ext_name in sorted(self.extensions):
+            self.create_extension_frame(ext_name)
+            ext_names = ext_names + '{' + ext_name + '} '
+        self.extension_names.set(ext_names)
+        self.extension_list.selection_set(0)
+        self.extension_selected(None)
+
+    def load_extensions(self):
+        "Fill self.extensions with data from the default and user configs."
+        self.extensions = {}
+        for ext_name in idleConf.GetExtensions(active_only=False):
+            self.extensions[ext_name] = []
+
+        for ext_name in self.extensions:
+            opt_list = sorted(self.ext_defaultCfg.GetOptionList(ext_name))
+
+            # bring 'enable' options to the beginning of the list
+            enables = [opt_name for opt_name in opt_list
+                       if opt_name.startswith('enable')]
+            for opt_name in enables:
+                opt_list.remove(opt_name)
+            opt_list = enables + opt_list
+
+            for opt_name in opt_list:
+                def_str = self.ext_defaultCfg.Get(
+                        ext_name, opt_name, raw=True)
+                try:
+                    def_obj = {'True':True, 'False':False}[def_str]
+                    opt_type = 'bool'
+                except KeyError:
+                    try:
+                        def_obj = int(def_str)
+                        opt_type = 'int'
+                    except ValueError:
+                        def_obj = def_str
+                        opt_type = None
+                try:
+                    value = self.ext_userCfg.Get(
+                            ext_name, opt_name, type=opt_type, raw=True,
+                            default=def_obj)
+                except ValueError:  # Need this until .Get fixed
+                    value = def_obj  # bad values overwritten by entry
+                var = StringVar(self)
+                var.set(str(value))
+
+                self.extensions[ext_name].append({'name': opt_name,
+                                                  'type': opt_type,
+                                                  'default': def_str,
+                                                  'value': value,
+                                                  'var': var,
+                                                 })
+
+    def extension_selected(self, event):
+        newsel = self.extension_list.curselection()
+        if newsel:
+            newsel = self.extension_list.get(newsel)
+        if newsel is None or newsel != self.current_extension:
+            if self.current_extension:
+                self.details_frame.config(text='')
+                self.config_frame[self.current_extension].grid_forget()
+                self.current_extension = None
+        if newsel:
+            self.details_frame.config(text=newsel)
+            self.config_frame[newsel].grid(column=0, row=0, sticky='nsew')
+            self.current_extension = newsel
+
+    def create_extension_frame(self, ext_name):
+        """Create a frame holding the widgets to configure one extension"""
+        f = VerticalScrolledFrame(self.details_frame, height=250, width=250)
+        self.config_frame[ext_name] = f
+        entry_area = f.interior
+        # create an entry for each configuration option
+        for row, opt in enumerate(self.extensions[ext_name]):
+            # create a row with a label and entry/checkbutton
+            label = Label(entry_area, text=opt['name'])
+            label.grid(row=row, column=0, sticky=NW)
+            var = opt['var']
+            if opt['type'] == 'bool':
+                Checkbutton(entry_area, textvariable=var, variable=var,
+                            onvalue='True', offvalue='False',
+                            indicatoron=FALSE, selectcolor='', width=8
+                            ).grid(row=row, column=1, sticky=W, padx=7)
+            elif opt['type'] == 'int':
+                Entry(entry_area, textvariable=var, validate='key',
+                      validatecommand=(self.is_int, '%P')
+                      ).grid(row=row, column=1, sticky=NSEW, padx=7)
+
+            else:
+                Entry(entry_area, textvariable=var
+                      ).grid(row=row, column=1, sticky=NSEW, padx=7)
+        return
+
+    def set_extension_value(self, section, opt):
+        name = opt['name']
+        default = opt['default']
+        value = opt['var'].get().strip() or default
+        opt['var'].set(value)
+        # if self.defaultCfg.has_section(section):
+        # Currently, always true; if not, indent to return
+        if (value == default):
+            return self.ext_userCfg.RemoveOption(section, name)
+        # set the option
+        return self.ext_userCfg.SetOption(section, name, value)
+
+    def save_all_changed_extensions(self):
+        """Save configuration changes to the user config file."""
+        has_changes = False
+        for ext_name in self.extensions:
+            options = self.extensions[ext_name]
+            for opt in options:
+                if self.set_extension_value(ext_name, opt):
+                    has_changes = True
+        if has_changes:
+            self.ext_userCfg.Save()
+
+
+help_common = '''\
+When you click either the Apply or Ok buttons, settings in this
+dialog that are different from IDLE's default are saved in
+a .idlerc directory in your home directory. Except as noted,
+these changes apply to all versions of IDLE installed on this
+machine. Some do not take affect until IDLE is restarted.
+[Cancel] only cancels changes made since the last save.
+'''
+help_pages = {
+    'Highlighting':'''
+Highlighting:
+The IDLE Dark color theme is new in October 2015.  It can only
+be used with older IDLE releases if it is saved as a custom
+theme, with a different name.
+'''
+}
+
+
+def is_int(s):
+    "Return 's is blank or represents an int'"
+    if not s:
+        return True
+    try:
+        int(s)
+        return True
+    except ValueError:
+        return False
+
 
 class VerticalScrolledFrame(Frame):
     """A pure Tkinter vertically scrollable frame.
@@ -1201,7 +1403,7 @@
         vscrollbar = Scrollbar(self, orient=VERTICAL)
         vscrollbar.pack(fill=Y, side=RIGHT, expand=FALSE)
         canvas = Canvas(self, bd=0, highlightthickness=0,
-                        yscrollcommand=vscrollbar.set)
+                        yscrollcommand=vscrollbar.set, width=240)
         canvas.pack(side=LEFT, fill=BOTH, expand=TRUE)
         vscrollbar.config(command=canvas.yview)
 
@@ -1219,9 +1421,6 @@
             # update the scrollbars to match the size of the inner frame
             size = (interior.winfo_reqwidth(), interior.winfo_reqheight())
             canvas.config(scrollregion="0 0 %s %s" % size)
-            if interior.winfo_reqwidth() != canvas.winfo_width():
-                # update the canvas's width to fit the inner frame
-                canvas.config(width=interior.winfo_reqwidth())
         interior.bind('<Configure>', _configure_interior)
 
         def _configure_canvas(event):
@@ -1232,207 +1431,10 @@
 
         return
 
-def is_int(s):
-    "Return 's is blank or represents an int'"
-    if not s:
-        return True
-    try:
-        int(s)
-        return True
-    except ValueError:
-        return False
-
-# TODO:
-# * Revert to default(s)? Per option or per extension?
-# * List options in their original order (possible??)
-class ConfigExtensionsDialog(Toplevel):
-    """A dialog for configuring IDLE extensions.
-
-    This dialog is generic - it works for any and all IDLE extensions.
-
-    IDLE extensions save their configuration options using idleConf.
-    ConfigExtensionsDialog reads the current configuration using idleConf,
-    supplies a GUI interface to change the configuration values, and saves the
-    changes using idleConf.
-
-    Not all changes take effect immediately - some may require restarting IDLE.
-    This depends on each extension's implementation.
-
-    All values are treated as text, and it is up to the user to supply
-    reasonable values. The only exception to this are the 'enable*' options,
-    which are boolean, and can be toggled with an True/False button.
-    """
-    def __init__(self, parent, title=None, _htest=False):
-        Toplevel.__init__(self, parent)
-        self.wm_withdraw()
-
-        self.configure(borderwidth=5)
-        self.geometry(
-                "+%d+%d" % (parent.winfo_rootx() + 20,
-                parent.winfo_rooty() + (30 if not _htest else 150)))
-        self.wm_title(title or 'IDLE Extensions Configuration')
-
-        self.defaultCfg = idleConf.defaultCfg['extensions']
-        self.userCfg = idleConf.userCfg['extensions']
-        self.is_int = self.register(is_int)
-        self.load_extensions()
-        self.create_widgets()
-
-        self.resizable(height=FALSE, width=FALSE) # don't allow resizing yet
-        self.transient(parent)
-        self.protocol("WM_DELETE_WINDOW", self.Cancel)
-        self.tabbed_page_set.focus_set()
-        # wait for window to be generated
-        self.update()
-        # set current width as the minimum width
-        self.wm_minsize(self.winfo_width(), 1)
-        # now allow resizing
-        self.resizable(height=TRUE, width=TRUE)
-
-        self.wm_deiconify()
-        if not _htest:
-            self.grab_set()
-            self.wait_window()
-
-    def load_extensions(self):
-        "Fill self.extensions with data from the default and user configs."
-        self.extensions = {}
-        for ext_name in idleConf.GetExtensions(active_only=False):
-            self.extensions[ext_name] = []
-
-        for ext_name in self.extensions:
-            opt_list = sorted(self.defaultCfg.GetOptionList(ext_name))
-
-            # bring 'enable' options to the beginning of the list
-            enables = [opt_name for opt_name in opt_list
-                       if opt_name.startswith('enable')]
-            for opt_name in enables:
-                opt_list.remove(opt_name)
-            opt_list = enables + opt_list
-
-            for opt_name in opt_list:
-                def_str = self.defaultCfg.Get(
-                        ext_name, opt_name, raw=True)
-                try:
-                    def_obj = {'True':True, 'False':False}[def_str]
-                    opt_type = 'bool'
-                except KeyError:
-                    try:
-                        def_obj = int(def_str)
-                        opt_type = 'int'
-                    except ValueError:
-                        def_obj = def_str
-                        opt_type = None
-                try:
-                    value = self.userCfg.Get(
-                            ext_name, opt_name, type=opt_type, raw=True,
-                            default=def_obj)
-                except ValueError:  # Need this until .Get fixed
-                    value = def_obj  # bad values overwritten by entry
-                var = StringVar(self)
-                var.set(str(value))
-
-                self.extensions[ext_name].append({'name': opt_name,
-                                                  'type': opt_type,
-                                                  'default': def_str,
-                                                  'value': value,
-                                                  'var': var,
-                                                 })
-
-    def create_widgets(self):
-        """Create the dialog's widgets."""
-        self.rowconfigure(0, weight=1)
-        self.rowconfigure(1, weight=0)
-        self.columnconfigure(0, weight=1)
-
-        # create the tabbed pages
-        self.tabbed_page_set = TabbedPageSet(
-                self, page_names=self.extensions.keys(),
-                n_rows=None, max_tabs_per_row=5,
-                page_class=TabbedPageSet.PageRemove)
-        self.tabbed_page_set.grid(row=0, column=0, sticky=NSEW)
-        for ext_name in self.extensions:
-            self.create_tab_page(ext_name)
-
-        self.create_action_buttons().grid(row=1)
-
-    create_action_buttons = ConfigDialog.create_action_buttons.im_func
-
-    def create_tab_page(self, ext_name):
-        """Create the page for an extension."""
-
-        page = LabelFrame(self.tabbed_page_set.pages[ext_name].frame,
-                          border=2, padx=2, relief=GROOVE,
-                          text=' %s ' % ext_name)
-        page.pack(fill=BOTH, expand=True, padx=12, pady=2)
-
-        # create the scrollable frame which will contain the entries
-        scrolled_frame = VerticalScrolledFrame(page, pady=2, height=250)
-        scrolled_frame.pack(side=BOTTOM, fill=BOTH, expand=TRUE)
-        entry_area = scrolled_frame.interior
-        entry_area.columnconfigure(0, weight=0)
-        entry_area.columnconfigure(1, weight=1)
-
-        # create an entry for each configuration option
-        for row, opt in enumerate(self.extensions[ext_name]):
-            # create a row with a label and entry/checkbutton
-            label = Label(entry_area, text=opt['name'])
-            label.grid(row=row, column=0, sticky=NW)
-            var = opt['var']
-            if opt['type'] == 'bool':
-                Checkbutton(entry_area, textvariable=var, variable=var,
-                            onvalue='True', offvalue='False',
-                            indicatoron=FALSE, selectcolor='', width=8
-                    ).grid(row=row, column=1, sticky=W, padx=7)
-            elif opt['type'] == 'int':
-                Entry(entry_area, textvariable=var, validate='key',
-                    validatecommand=(self.is_int, '%P')
-                    ).grid(row=row, column=1, sticky=NSEW, padx=7)
-
-            else:
-                Entry(entry_area, textvariable=var
-                    ).grid(row=row, column=1, sticky=NSEW, padx=7)
-        return
-
-
-    Ok = ConfigDialog.Ok.im_func
-
-    def Apply(self):
-        self.save_all_changed_configs()
-        pass
-
-    Cancel = ConfigDialog.Cancel.im_func
-
-    def Help(self):
-        pass
-
-    def set_user_value(self, section, opt):
-        name = opt['name']
-        default = opt['default']
-        value = opt['var'].get().strip() or default
-        opt['var'].set(value)
-        # if self.defaultCfg.has_section(section):
-        # Currently, always true; if not, indent to return
-        if (value == default):
-            return self.userCfg.RemoveOption(section, name)
-        # set the option
-        return self.userCfg.SetOption(section, name, value)
-
-    def save_all_changed_configs(self):
-        """Save configuration changes to the user config file."""
-        has_changes = False
-        for ext_name in self.extensions:
-            options = self.extensions[ext_name]
-            for opt in options:
-                if self.set_user_value(ext_name, opt):
-                    has_changes = True
-        if has_changes:
-            self.userCfg.Save()
-
 
 if __name__ == '__main__':
     import unittest
     unittest.main('idlelib.idle_test.test_configdialog',
                   verbosity=2, exit=False)
     from idlelib.idle_test.htest import run
-    run(ConfigDialog, ConfigExtensionsDialog)
+    run(ConfigDialog)
diff --git a/lib/python2.7/idlelib/configHandler.py b/lib/python2.7/idlelib/configHandler.py
index 646660a..efd5f3c 100644
--- a/lib/python2.7/idlelib/configHandler.py
+++ b/lib/python2.7/idlelib/configHandler.py
@@ -23,6 +23,8 @@
 import sys
 
 from ConfigParser import ConfigParser
+from Tkinter import TkVersion
+from tkFont import Font, nametofont
 
 class InvalidConfigType(Exception): pass
 class InvalidConfigSet(Exception): pass
@@ -371,8 +373,32 @@
         return theme
 
     def CurrentTheme(self):
-        "Return the name of the currently active theme."
-        return self.GetOption('main', 'Theme', 'name', default='')
+        """Return the name of the currently active text color theme.
+
+        idlelib.config-main.def includes this section
+        [Theme]
+        default= 1
+        name= IDLE Classic
+        name2=
+        # name2 set in user config-main.cfg for themes added after 2015 Oct 1
+
+        Item name2 is needed because setting name to a new builtin
+        causes older IDLEs to display multiple error messages or quit.
+        See https://bugs.python.org/issue25313.
+        When default = True, name2 takes precedence over name,
+        while older IDLEs will just use name.
+        """
+        default = self.GetOption('main', 'Theme', 'default',
+                                 type='bool', default=True)
+        if default:
+            theme = self.GetOption('main', 'Theme', 'name2', default='')
+        if default and not theme or not default:
+            theme = self.GetOption('main', 'Theme', 'name', default='')
+        source = self.defaultCfg if default else self.userCfg
+        if source['highlight'].has_section(theme):
+            return theme
+        else:
+            return "IDLE Classic"
 
     def CurrentKeys(self):
         "Return the name of the currently active key set."
@@ -671,6 +697,35 @@
                 self.GetExtraHelpSourceList('user') )
         return allHelpSources
 
+    def GetFont(self, root, configType, section):
+        """Retrieve a font from configuration (font, font-size, font-bold)
+        Intercept the special value 'TkFixedFont' and substitute
+        the actual font, factoring in some tweaks if needed for
+        appearance sakes.
+
+        The 'root' parameter can normally be any valid Tkinter widget.
+
+        Return a tuple (family, size, weight) suitable for passing
+        to tkinter.Font
+        """
+        family = self.GetOption(configType, section, 'font', default='courier')
+        size = self.GetOption(configType, section, 'font-size', type='int',
+                              default='10')
+        bold = self.GetOption(configType, section, 'font-bold', default=0,
+                              type='bool')
+        if (family == 'TkFixedFont'):
+            if TkVersion < 8.5:
+                family = 'Courier'
+            else:
+                f = Font(name='TkFixedFont', exists=True, root=root)
+                actualFont = Font.actual(f)
+                family = actualFont['family']
+                size = actualFont['size']
+                if size < 0:
+                    size = 10  # if font in pixels, ignore actual size
+                bold = actualFont['weight']=='bold'
+        return (family, size, 'bold' if bold else 'normal')
+
     def LoadCfgFiles(self):
         "Load all configuration files."
         for key in self.defaultCfg:
diff --git a/lib/python2.7/idlelib/help.html b/lib/python2.7/idlelib/help.html
new file mode 100644
index 0000000..9e0eba0
--- /dev/null
+++ b/lib/python2.7/idlelib/help.html
@@ -0,0 +1,709 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+  <head>
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+
+    <title>24.6. IDLE &mdash; Python 2.7.10 documentation</title>
+
+    <link rel="stylesheet" href="../_static/default.css" type="text/css" />
+    <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
+
+    <script type="text/javascript">
+      var DOCUMENTATION_OPTIONS = {
+        URL_ROOT:    '../',
+        VERSION:     '2.7.10',
+        COLLAPSE_INDEX: false,
+        FILE_SUFFIX: '.html',
+        HAS_SOURCE:  true
+      };
+    </script>
+    <script type="text/javascript" src="../_static/jquery.js"></script>
+    <script type="text/javascript" src="../_static/underscore.js"></script>
+    <script type="text/javascript" src="../_static/doctools.js"></script>
+    <script type="text/javascript" src="../_static/sidebar.js"></script>
+    <link rel="search" type="application/opensearchdescription+xml"
+          title="Search within Python 2.7.10 documentation"
+          href="../_static/opensearch.xml"/>
+    <link rel="author" title="About these documents" href="../about.html" />
+    <link rel="copyright" title="Copyright" href="../copyright.html" />
+    <link rel="top" title="Python 2.7.10 documentation" href="../index.html" />
+    <link rel="up" title="24. Graphical User Interfaces with Tk" href="tk.html" />
+    <link rel="next" title="24.7. Other Graphical User Interface Packages" href="othergui.html" />
+    <link rel="prev" title="24.5. turtle — Turtle graphics for Tk" href="turtle.html" />
+    <link rel="shortcut icon" type="image/png" href="../_static/py.png" />
+    <script type="text/javascript" src="../_static/copybutton.js"></script>
+
+
+
+
+  </head>
+  <body>
+    <div class="related">
+      <h3>Navigation</h3>
+      <ul>
+        <li class="right" style="margin-right: 10px">
+          <a href="../genindex.html" title="General Index"
+             accesskey="I">index</a></li>
+        <li class="right" >
+          <a href="../py-modindex.html" title="Python Module Index"
+             >modules</a> |</li>
+        <li class="right" >
+          <a href="othergui.html" title="24.7. Other Graphical User Interface Packages"
+             accesskey="N">next</a> |</li>
+        <li class="right" >
+          <a href="turtle.html" title="24.5. turtle — Turtle graphics for Tk"
+             accesskey="P">previous</a> |</li>
+        <li><img src="../_static/py.png" alt=""
+                 style="vertical-align: middle; margin-top: -1px"/></li>
+        <li><a href="https://www.python.org/">Python</a> &raquo;</li>
+        <li>
+          <a href="../index.html">Python 2.7.10 documentation</a> &raquo;
+        </li>
+
+          <li><a href="index.html" >The Python Standard Library</a> &raquo;</li>
+          <li><a href="tk.html" accesskey="U">24. Graphical User Interfaces with Tk</a> &raquo;</li>
+      </ul>
+    </div>
+
+    <div class="document">
+      <div class="documentwrapper">
+        <div class="bodywrapper">
+          <div class="body">
+
+  <div class="section" id="idle">
+<span id="id1"></span><h1>24.6. IDLE<a class="headerlink" href="#idle" title="Permalink to this headline">¶</a></h1>
+<p id="index-0">IDLE is Python&#8217;s Integrated Development and Learning Environment.</p>
+<p>IDLE has the following features:</p>
+<ul class="simple">
+<li>coded in 100% pure Python, using the <tt class="xref py py-mod docutils literal"><span class="pre">tkinter</span></tt> GUI toolkit</li>
+<li>cross-platform: works mostly the same on Windows, Unix, and Mac OS X</li>
+<li>Python shell window (interactive interpreter) with colorizing
+of code input, output, and error messages</li>
+<li>multi-window text editor with multiple undo, Python colorizing,
+smart indent, call tips, auto completion, and other features</li>
+<li>search within any window, replace within editor windows, and search
+through multiple files (grep)</li>
+<li>debugger with persistent breakpoints, stepping, and viewing
+of global and local namespaces</li>
+<li>configuration, browsers, and other dialogs</li>
+</ul>
+<div class="section" id="menus">
+<h2>24.6.1. Menus<a class="headerlink" href="#menus" title="Permalink to this headline">¶</a></h2>
+<p>IDLE has two main window types, the Shell window and the Editor window.  It is
+possible to have multiple editor windows simultaneously.  Output windows, such
+as used for Edit / Find in Files, are a subtype of edit window.  They currently
+have the same top menu as Editor windows but a different default title and
+context menu.</p>
+<p>IDLE&#8217;s menus dynamically change based on which window is currently selected.
+Each menu documented below indicates which window type it is associated with.</p>
+<div class="section" id="file-menu-shell-and-editor">
+<h3>24.6.1.1. File menu (Shell and Editor)<a class="headerlink" href="#file-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
+<dl class="docutils">
+<dt>New File</dt>
+<dd>Create a new file editing window.</dd>
+<dt>Open...</dt>
+<dd>Open an existing file with an Open dialog.</dd>
+<dt>Recent Files</dt>
+<dd>Open a list of recent files.  Click one to open it.</dd>
+<dt>Open Module...</dt>
+<dd>Open an existing module (searches sys.path).</dd>
+</dl>
+<dl class="docutils" id="index-1">
+<dt>Class Browser</dt>
+<dd>Show functions, classes, and methods in the current Editor file in a
+tree structure.  In the shell, open a module first.</dd>
+<dt>Path Browser</dt>
+<dd>Show sys.path directories, modules, functions, classes and methods in a
+tree structure.</dd>
+<dt>Save</dt>
+<dd>Save the current window to the associated file, if there is one.  Windows
+that have been changed since being opened or last saved have a * before
+and after the window title.  If there is no associated file,
+do Save As instead.</dd>
+<dt>Save As...</dt>
+<dd>Save the current window with a Save As dialog.  The file saved becomes the
+new associated file for the window.</dd>
+<dt>Save Copy As...</dt>
+<dd>Save the current window to different file without changing the associated
+file.</dd>
+<dt>Print Window</dt>
+<dd>Print the current window to the default printer.</dd>
+<dt>Close</dt>
+<dd>Close the current window (ask to save if unsaved).</dd>
+<dt>Exit</dt>
+<dd>Close all windows and quit IDLE (ask to save unsaved windows).</dd>
+</dl>
+</div>
+<div class="section" id="edit-menu-shell-and-editor">
+<h3>24.6.1.2. Edit menu (Shell and Editor)<a class="headerlink" href="#edit-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
+<dl class="docutils">
+<dt>Undo</dt>
+<dd>Undo the last change to the current window.  A maximum of 1000 changes may
+be undone.</dd>
+<dt>Redo</dt>
+<dd>Redo the last undone change to the current window.</dd>
+<dt>Cut</dt>
+<dd>Copy selection into the system-wide clipboard; then delete the selection.</dd>
+<dt>Copy</dt>
+<dd>Copy selection into the system-wide clipboard.</dd>
+<dt>Paste</dt>
+<dd>Insert contents of the system-wide clipboard into the current window.</dd>
+</dl>
+<p>The clipboard functions are also available in context menus.</p>
+<dl class="docutils">
+<dt>Select All</dt>
+<dd>Select the entire contents of the current window.</dd>
+<dt>Find...</dt>
+<dd>Open a search dialog with many options</dd>
+<dt>Find Again</dt>
+<dd>Repeat the last search, if there is one.</dd>
+<dt>Find Selection</dt>
+<dd>Search for the currently selected string, if there is one.</dd>
+<dt>Find in Files...</dt>
+<dd>Open a file search dialog.  Put results in an new output window.</dd>
+<dt>Replace...</dt>
+<dd>Open a search-and-replace dialog.</dd>
+<dt>Go to Line</dt>
+<dd>Move cursor to the line number requested and make that line visible.</dd>
+<dt>Show Completions</dt>
+<dd>Open a scrollable list allowing selection of keywords and attributes. See
+Completions in the Tips sections below.</dd>
+<dt>Expand Word</dt>
+<dd>Expand a prefix you have typed to match a full word in the same window;
+repeat to get a different expansion.</dd>
+<dt>Show call tip</dt>
+<dd>After an unclosed parenthesis for a function, open a small window with
+function parameter hints.</dd>
+<dt>Show surrounding parens</dt>
+<dd>Highlight the surrounding parenthesis.</dd>
+</dl>
+</div>
+<div class="section" id="format-menu-editor-window-only">
+<h3>24.6.1.3. Format menu (Editor window only)<a class="headerlink" href="#format-menu-editor-window-only" title="Permalink to this headline">¶</a></h3>
+<dl class="docutils">
+<dt>Indent Region</dt>
+<dd>Shift selected lines right by the indent width (default 4 spaces).</dd>
+<dt>Dedent Region</dt>
+<dd>Shift selected lines left by the indent width (default 4 spaces).</dd>
+<dt>Comment Out Region</dt>
+<dd>Insert ## in front of selected lines.</dd>
+<dt>Uncomment Region</dt>
+<dd>Remove leading # or ## from selected lines.</dd>
+<dt>Tabify Region</dt>
+<dd>Turn <em>leading</em> stretches of spaces into tabs. (Note: We recommend using
+4 space blocks to indent Python code.)</dd>
+<dt>Untabify Region</dt>
+<dd>Turn <em>all</em> tabs into the correct number of spaces.</dd>
+<dt>Toggle Tabs</dt>
+<dd>Open a dialog to switch between indenting with spaces and tabs.</dd>
+<dt>New Indent Width</dt>
+<dd>Open a dialog to change indent width. The accepted default by the Python
+community is 4 spaces.</dd>
+<dt>Format Paragraph</dt>
+<dd>Reformat the current blank-line-delimited paragraph in comment block or
+multiline string or selected line in a string.  All lines in the
+paragraph will be formatted to less than N columns, where N defaults to 72.</dd>
+<dt>Strip trailing whitespace</dt>
+<dd>Remove any space characters after the last non-space character of a line.</dd>
+</dl>
+</div>
+<div class="section" id="run-menu-editor-window-only">
+<span id="index-2"></span><h3>24.6.1.4. Run menu (Editor window only)<a class="headerlink" href="#run-menu-editor-window-only" title="Permalink to this headline">¶</a></h3>
+<dl class="docutils">
+<dt>Python Shell</dt>
+<dd>Open or wake up the Python Shell window.</dd>
+<dt>Check Module</dt>
+<dd>Check the syntax of the module currently open in the Editor window. If the
+module has not been saved IDLE will either prompt the user to save or
+autosave, as selected in the General tab of the Idle Settings dialog.  If
+there is a syntax error, the approximate location is indicated in the
+Editor window.</dd>
+<dt>Run Module</dt>
+<dd>Do Check Module (above).  If no error, restart the shell to clean the
+environment, then execute the module.  Output is displayed in the Shell
+window.  Note that output requires use of <tt class="docutils literal"><span class="pre">print</span></tt> or <tt class="docutils literal"><span class="pre">write</span></tt>.
+When execution is complete, the Shell retains focus and displays a prompt.
+At this point, one may interactively explore the result of execution.
+This is similar to executing a file with <tt class="docutils literal"><span class="pre">python</span> <span class="pre">-i</span> <span class="pre">file</span></tt> at a command
+line.</dd>
+</dl>
+</div>
+<div class="section" id="shell-menu-shell-window-only">
+<h3>24.6.1.5. Shell menu (Shell window only)<a class="headerlink" href="#shell-menu-shell-window-only" title="Permalink to this headline">¶</a></h3>
+<dl class="docutils">
+<dt>View Last Restart</dt>
+<dd>Scroll the shell window to the last Shell restart.</dd>
+<dt>Restart Shell</dt>
+<dd>Restart the shell to clean the environment.</dd>
+</dl>
+</div>
+<div class="section" id="debug-menu-shell-window-only">
+<h3>24.6.1.6. Debug menu (Shell window only)<a class="headerlink" href="#debug-menu-shell-window-only" title="Permalink to this headline">¶</a></h3>
+<dl class="docutils">
+<dt>Go to File/Line</dt>
+<dd>Look on the current line. with the cursor, and the line above for a filename
+and line number.  If found, open the file if not already open, and show the
+line.  Use this to view source lines referenced in an exception traceback
+and lines found by Find in Files. Also available in the context menu of
+the Shell window and Output windows.</dd>
+</dl>
+<dl class="docutils" id="index-3">
+<dt>Debugger (toggle)</dt>
+<dd>When actived, code entered in the Shell or run from an Editor will run
+under the debugger.  In the Editor, breakpoints can be set with the context
+menu.  This feature is still incomplete and somewhat experimental.</dd>
+<dt>Stack Viewer</dt>
+<dd>Show the stack traceback of the last exception in a tree widget, with
+access to locals and globals.</dd>
+<dt>Auto-open Stack Viewer</dt>
+<dd>Toggle automatically opening the stack viewer on an unhandled exception.</dd>
+</dl>
+</div>
+<div class="section" id="options-menu-shell-and-editor">
+<h3>24.6.1.7. Options menu (Shell and Editor)<a class="headerlink" href="#options-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
+<dl class="docutils">
+<dt>Configure IDLE</dt>
+<dd><p class="first">Open a configuration dialog and change preferences for the following:
+fonts, indentation, keybindings, text color themes, startup windows and
+size, additional help sources, and extensions (see below).  On OS X,
+open the configuration dialog by selecting Preferences in the application
+menu.  To use a new built-in color theme (IDLE Dark) with older IDLEs,
+save it as a new custom theme.</p>
+<p class="last">Non-default user settings are saved in a .idlerc directory in the user&#8217;s
+home directory.  Problems caused by bad user configuration files are solved
+by editing or deleting one or more of the files in .idlerc.</p>
+</dd>
+<dt>Code Context (toggle)(Editor Window only)</dt>
+<dd>Open a pane at the top of the edit window which shows the block context
+of the code which has scrolled above the top of the window.</dd>
+</dl>
+</div>
+<div class="section" id="window-menu-shell-and-editor">
+<h3>24.6.1.8. Window menu (Shell and Editor)<a class="headerlink" href="#window-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
+<dl class="docutils">
+<dt>Zoom Height</dt>
+<dd>Toggles the window between normal size and maximum height. The initial size
+defaults to 40 lines by 80 chars unless changed on the General tab of the
+Configure IDLE dialog.</dd>
+</dl>
+<p>The rest of this menu lists the names of all open windows; select one to bring
+it to the foreground (deiconifying it if necessary).</p>
+</div>
+<div class="section" id="help-menu-shell-and-editor">
+<h3>24.6.1.9. Help menu (Shell and Editor)<a class="headerlink" href="#help-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
+<dl class="docutils">
+<dt>About IDLE</dt>
+<dd>Display version, copyright, license, credits, and more.</dd>
+<dt>IDLE Help</dt>
+<dd>Display a help file for IDLE detailing the menu options, basic editing and
+navigation, and other tips.</dd>
+<dt>Python Docs</dt>
+<dd>Access local Python documentation, if installed, or start a web browser
+and open docs.python.org showing the latest Python documentation.</dd>
+<dt>Turtle Demo</dt>
+<dd>Run the turtledemo module with example python code and turtle drawings.</dd>
+</dl>
+<p>Additional help sources may be added here with the Configure IDLE dialog under
+the General tab.</p>
+</div>
+<div class="section" id="context-menus">
+<span id="index-4"></span><h3>24.6.1.10. Context Menus<a class="headerlink" href="#context-menus" title="Permalink to this headline">¶</a></h3>
+<p>Open a context menu by right-clicking in a window (Control-click on OS X).
+Context menus have the standard clipboard functions also on the Edit menu.</p>
+<dl class="docutils">
+<dt>Cut</dt>
+<dd>Copy selection into the system-wide clipboard; then delete the selection.</dd>
+<dt>Copy</dt>
+<dd>Copy selection into the system-wide clipboard.</dd>
+<dt>Paste</dt>
+<dd>Insert contents of the system-wide clipboard into the current window.</dd>
+</dl>
+<p>Editor windows also have breakpoint functions.  Lines with a breakpoint set are
+specially marked.  Breakpoints only have an effect when running under the
+debugger.  Breakpoints for a file are saved in the user&#8217;s .idlerc directory.</p>
+<dl class="docutils">
+<dt>Set Breakpoint</dt>
+<dd>Set a breakpoint on the current line.</dd>
+<dt>Clear Breakpoint</dt>
+<dd>Clear the breakpoint on that line.</dd>
+</dl>
+<p>Shell and Output windows have the following.</p>
+<dl class="docutils">
+<dt>Go to file/line</dt>
+<dd>Same as in Debug menu.</dd>
+</dl>
+</div>
+</div>
+<div class="section" id="editing-and-navigation">
+<h2>24.6.2. Editing and navigation<a class="headerlink" href="#editing-and-navigation" title="Permalink to this headline">¶</a></h2>
+<p>In this section, &#8216;C&#8217; refers to the <tt class="kbd docutils literal"><span class="pre">Control</span></tt> key on Windows and Unix and
+the <tt class="kbd docutils literal"><span class="pre">Command</span></tt> key on Mac OSX.</p>
+<ul>
+<li><p class="first"><tt class="kbd docutils literal"><span class="pre">Backspace</span></tt> deletes to the left; <tt class="kbd docutils literal"><span class="pre">Del</span></tt> deletes to the right</p>
+</li>
+<li><p class="first"><tt class="kbd docutils literal"><span class="pre">C-Backspace</span></tt> delete word left; <tt class="kbd docutils literal"><span class="pre">C-Del</span></tt> delete word to the right</p>
+</li>
+<li><p class="first">Arrow keys and <tt class="kbd docutils literal"><span class="pre">Page</span> <span class="pre">Up</span></tt>/<tt class="kbd docutils literal"><span class="pre">Page</span> <span class="pre">Down</span></tt> to move around</p>
+</li>
+<li><p class="first"><tt class="kbd docutils literal"><span class="pre">C-LeftArrow</span></tt> and <tt class="kbd docutils literal"><span class="pre">C-RightArrow</span></tt> moves by words</p>
+</li>
+<li><p class="first"><tt class="kbd docutils literal"><span class="pre">Home</span></tt>/<tt class="kbd docutils literal"><span class="pre">End</span></tt> go to begin/end of line</p>
+</li>
+<li><p class="first"><tt class="kbd docutils literal"><span class="pre">C-Home</span></tt>/<tt class="kbd docutils literal"><span class="pre">C-End</span></tt> go to begin/end of file</p>
+</li>
+<li><p class="first">Some useful Emacs bindings are inherited from Tcl/Tk:</p>
+<blockquote>
+<div><ul class="simple">
+<li><tt class="kbd docutils literal"><span class="pre">C-a</span></tt> beginning of line</li>
+<li><tt class="kbd docutils literal"><span class="pre">C-e</span></tt> end of line</li>
+<li><tt class="kbd docutils literal"><span class="pre">C-k</span></tt> kill line (but doesn&#8217;t put it in clipboard)</li>
+<li><tt class="kbd docutils literal"><span class="pre">C-l</span></tt> center window around the insertion point</li>
+<li><tt class="kbd docutils literal"><span class="pre">C-b</span></tt> go backwards one character without deleting (usually you can
+also use the cursor key for this)</li>
+<li><tt class="kbd docutils literal"><span class="pre">C-f</span></tt> go forward one character without deleting (usually you can
+also use the cursor key for this)</li>
+<li><tt class="kbd docutils literal"><span class="pre">C-p</span></tt> go up one line (usually you can also use the cursor key for
+this)</li>
+<li><tt class="kbd docutils literal"><span class="pre">C-d</span></tt> delete next character</li>
+</ul>
+</div></blockquote>
+</li>
+</ul>
+<p>Standard keybindings (like <tt class="kbd docutils literal"><span class="pre">C-c</span></tt> to copy and <tt class="kbd docutils literal"><span class="pre">C-v</span></tt> to paste)
+may work.  Keybindings are selected in the Configure IDLE dialog.</p>
+<div class="section" id="automatic-indentation">
+<h3>24.6.2.1. Automatic indentation<a class="headerlink" href="#automatic-indentation" title="Permalink to this headline">¶</a></h3>
+<p>After a block-opening statement, the next line is indented by 4 spaces (in the
+Python Shell window by one tab).  After certain keywords (break, return etc.)
+the next line is dedented.  In leading indentation, <tt class="kbd docutils literal"><span class="pre">Backspace</span></tt> deletes up
+to 4 spaces if they are there. <tt class="kbd docutils literal"><span class="pre">Tab</span></tt> inserts spaces (in the Python
+Shell window one tab), number depends on Indent width. Currently tabs
+are restricted to four spaces due to Tcl/Tk limitations.</p>
+<p>See also the indent/dedent region commands in the edit menu.</p>
+</div>
+<div class="section" id="completions">
+<h3>24.6.2.2. Completions<a class="headerlink" href="#completions" title="Permalink to this headline">¶</a></h3>
+<p>Completions are supplied for functions, classes, and attributes of classes,
+both built-in and user-defined. Completions are also provided for
+filenames.</p>
+<p>The AutoCompleteWindow (ACW) will open after a predefined delay (default is
+two seconds) after a &#8216;.&#8217; or (in a string) an os.sep is typed. If after one
+of those characters (plus zero or more other characters) a tab is typed
+the ACW will open immediately if a possible continuation is found.</p>
+<p>If there is only one possible completion for the characters entered, a
+<tt class="kbd docutils literal"><span class="pre">Tab</span></tt> will supply that completion without opening the ACW.</p>
+<p>&#8216;Show Completions&#8217; will force open a completions window, by default the
+<tt class="kbd docutils literal"><span class="pre">C-space</span></tt> will open a completions window. In an empty
+string, this will contain the files in the current directory. On a
+blank line, it will contain the built-in and user-defined functions and
+classes in the current name spaces, plus any modules imported. If some
+characters have been entered, the ACW will attempt to be more specific.</p>
+<p>If a string of characters is typed, the ACW selection will jump to the
+entry most closely matching those characters.  Entering a <tt class="kbd docutils literal"><span class="pre">tab</span></tt> will
+cause the longest non-ambiguous match to be entered in the Editor window or
+Shell.  Two <tt class="kbd docutils literal"><span class="pre">tab</span></tt> in a row will supply the current ACW selection, as
+will return or a double click.  Cursor keys, Page Up/Down, mouse selection,
+and the scroll wheel all operate on the ACW.</p>
+<p>&#8220;Hidden&#8221; attributes can be accessed by typing the beginning of hidden
+name after a &#8216;.&#8217;, e.g. &#8216;_&#8217;. This allows access to modules with
+<tt class="docutils literal"><span class="pre">__all__</span></tt> set, or to class-private attributes.</p>
+<p>Completions and the &#8216;Expand Word&#8217; facility can save a lot of typing!</p>
+<p>Completions are currently limited to those in the namespaces. Names in
+an Editor window which are not via <tt class="docutils literal"><span class="pre">__main__</span></tt> and <a class="reference internal" href="sys.html#sys.modules" title="sys.modules"><tt class="xref py py-data docutils literal"><span class="pre">sys.modules</span></tt></a> will
+not be found.  Run the module once with your imports to correct this situation.
+Note that IDLE itself places quite a few modules in sys.modules, so
+much can be found by default, e.g. the re module.</p>
+<p>If you don&#8217;t like the ACW popping up unbidden, simply make the delay
+longer or disable the extension.</p>
+</div>
+<div class="section" id="calltips">
+<h3>24.6.2.3. Calltips<a class="headerlink" href="#calltips" title="Permalink to this headline">¶</a></h3>
+<p>A calltip is shown when one types <tt class="kbd docutils literal"><span class="pre">(</span></tt> after the name of an <em>acccessible</em>
+function.  A name expression may include dots and subscripts.  A calltip
+remains until it is clicked, the cursor is moved out of the argument area,
+or <tt class="kbd docutils literal"><span class="pre">)</span></tt> is typed.  When the cursor is in the argument part of a definition,
+the menu or shortcut display a calltip.</p>
+<p>A calltip consists of the function signature and the first line of the
+docstring.  For builtins without an accessible signature, the calltip
+consists of all lines up the fifth line or the first blank line.  These
+details may change.</p>
+<p>The set of <em>accessible</em> functions depends on what modules have been imported
+into the user process, including those imported by Idle itself,
+and what definitions have been run, all since the last restart.</p>
+<p>For example, restart the Shell and enter <tt class="docutils literal"><span class="pre">itertools.count(</span></tt>.  A calltip
+appears because Idle imports itertools into the user process for its own use.
+(This could change.)  Enter <tt class="docutils literal"><span class="pre">turtle.write(</span></tt> and nothing appears.  Idle does
+not import turtle.  The menu or shortcut do nothing either.  Enter
+<tt class="docutils literal"><span class="pre">import</span> <span class="pre">turtle</span></tt> and then <tt class="docutils literal"><span class="pre">turtle.write(</span></tt> will work.</p>
+<p>In an editor, import statements have no effect until one runs the file.  One
+might want to run a file after writing the import statements at the top,
+or immediately run an existing file before editing.</p>
+</div>
+<div class="section" id="python-shell-window">
+<h3>24.6.2.4. Python Shell window<a class="headerlink" href="#python-shell-window" title="Permalink to this headline">¶</a></h3>
+<ul>
+<li><p class="first"><tt class="kbd docutils literal"><span class="pre">C-c</span></tt> interrupts executing command</p>
+</li>
+<li><p class="first"><tt class="kbd docutils literal"><span class="pre">C-d</span></tt> sends end-of-file; closes window if typed at a <tt class="docutils literal"><span class="pre">&gt;&gt;&gt;</span></tt> prompt</p>
+</li>
+<li><p class="first"><tt class="kbd docutils literal"><span class="pre">Alt-/</span></tt> (Expand word) is also useful to reduce typing</p>
+<p>Command history</p>
+<ul class="simple">
+<li><tt class="kbd docutils literal"><span class="pre">Alt-p</span></tt> retrieves previous command matching what you have typed. On
+OS X use <tt class="kbd docutils literal"><span class="pre">C-p</span></tt>.</li>
+<li><tt class="kbd docutils literal"><span class="pre">Alt-n</span></tt> retrieves next. On OS X use <tt class="kbd docutils literal"><span class="pre">C-n</span></tt>.</li>
+<li><tt class="kbd docutils literal"><span class="pre">Return</span></tt> while on any previous command retrieves that command</li>
+</ul>
+</li>
+</ul>
+</div>
+<div class="section" id="text-colors">
+<h3>24.6.2.5. Text colors<a class="headerlink" href="#text-colors" title="Permalink to this headline">¶</a></h3>
+<p>Idle defaults to black on white text, but colors text with special meanings.
+For the shell, these are shell output, shell error, user output, and
+user error.  For Python code, at the shell prompt or in an editor, these are
+keywords, builtin class and function names, names following <tt class="docutils literal"><span class="pre">class</span></tt> and
+<tt class="docutils literal"><span class="pre">def</span></tt>, strings, and comments. For any text window, these are the cursor (when
+present), found text (when possible), and selected text.</p>
+<p>Text coloring is done in the background, so uncolorized text is occasionally
+visible.  To change the color scheme, use the Configure IDLE dialog
+Highlighting tab.  The marking of debugger breakpoint lines in the editor and
+text in popups and dialogs is not user-configurable.</p>
+</div>
+</div>
+<div class="section" id="startup-and-code-execution">
+<h2>24.6.3. Startup and code execution<a class="headerlink" href="#startup-and-code-execution" title="Permalink to this headline">¶</a></h2>
+<p>Upon startup with the <tt class="docutils literal"><span class="pre">-s</span></tt> option, IDLE will execute the file referenced by
+the environment variables <span class="target" id="index-5"></span><tt class="xref std std-envvar docutils literal"><span class="pre">IDLESTARTUP</span></tt> or <span class="target" id="index-6"></span><a class="reference internal" href="../using/cmdline.html#envvar-PYTHONSTARTUP"><tt class="xref std std-envvar docutils literal"><span class="pre">PYTHONSTARTUP</span></tt></a>.
+IDLE first checks for <tt class="docutils literal"><span class="pre">IDLESTARTUP</span></tt>; if <tt class="docutils literal"><span class="pre">IDLESTARTUP</span></tt> is present the file
+referenced is run.  If <tt class="docutils literal"><span class="pre">IDLESTARTUP</span></tt> is not present, IDLE checks for
+<tt class="docutils literal"><span class="pre">PYTHONSTARTUP</span></tt>.  Files referenced by these environment variables are
+convenient places to store functions that are used frequently from the IDLE
+shell, or for executing import statements to import common modules.</p>
+<p>In addition, <tt class="docutils literal"><span class="pre">Tk</span></tt> also loads a startup file if it is present.  Note that the
+Tk file is loaded unconditionally.  This additional file is <tt class="docutils literal"><span class="pre">.Idle.py</span></tt> and is
+looked for in the user&#8217;s home directory.  Statements in this file will be
+executed in the Tk namespace, so this file is not useful for importing
+functions to be used from IDLE&#8217;s Python shell.</p>
+<div class="section" id="command-line-usage">
+<h3>24.6.3.1. Command line usage<a class="headerlink" href="#command-line-usage" title="Permalink to this headline">¶</a></h3>
+<div class="highlight-python"><div class="highlight"><pre>idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
+
+-c command  run command in the shell window
+-d          enable debugger and open shell window
+-e          open editor window
+-h          print help message with legal combinatios and exit
+-i          open shell window
+-r file     run file in shell window
+-s          run $IDLESTARTUP or $PYTHONSTARTUP first, in shell window
+-t title    set title of shell window
+-           run stdin in shell (- must be last option before args)
+</pre></div>
+</div>
+<p>If there are arguments:</p>
+<ul class="simple">
+<li>If <tt class="docutils literal"><span class="pre">-</span></tt>, <tt class="docutils literal"><span class="pre">-c</span></tt>, or <tt class="docutils literal"><span class="pre">r</span></tt> is used, all arguments are placed in
+<tt class="docutils literal"><span class="pre">sys.argv[1:...]</span></tt> and <tt class="docutils literal"><span class="pre">sys.argv[0]</span></tt> is set to <tt class="docutils literal"><span class="pre">''</span></tt>, <tt class="docutils literal"><span class="pre">'-c'</span></tt>,
+or <tt class="docutils literal"><span class="pre">'-r'</span></tt>.  No editor window is opened, even if that is the default
+set in the Options dialog.</li>
+<li>Otherwise, arguments are files opened for editing and
+<tt class="docutils literal"><span class="pre">sys.argv</span></tt> reflects the arguments passed to IDLE itself.</li>
+</ul>
+</div>
+<div class="section" id="idle-console-differences">
+<h3>24.6.3.2. IDLE-console differences<a class="headerlink" href="#idle-console-differences" title="Permalink to this headline">¶</a></h3>
+<p>As much as possible, the result of executing Python code with IDLE is the
+same as executing the same code in a console window.  However, the different
+interface and operation occasionally affects results.</p>
+<p>For instance, IDLE normally executes user code in a separate process from
+the IDLE GUI itself.  The IDLE versions of sys.stdin, .stdout, and .stderr in the
+execution process get input from and send output to the GUI process,
+which keeps control of the keyboard and screen.  This is normally transparent,
+but code that access these object will see different attribute values.
+Also, functions that directly access the keyboard and screen will not work.</p>
+<p>With IDLE&#8217;s Shell, one enters, edits, and recalls complete statements.
+Some consoles only work with a single physical line at a time.</p>
+</div>
+<div class="section" id="running-without-a-subprocess">
+<h3>24.6.3.3. Running without a subprocess<a class="headerlink" href="#running-without-a-subprocess" title="Permalink to this headline">¶</a></h3>
+<p>By default, IDLE executes user code in a separate subprocess via a socket,
+which uses the internal loopback interface.  This connection is not
+externally visible and no data is sent to or received from the Internet.
+If firewall software complains anyway, you can ignore it.</p>
+<p>If the attempt to make the socket connection fails, Idle will notify you.
+Such failures are sometimes transient, but if persistent, the problem
+may be either a firewall blocking the connecton or misconfiguration of
+a particular system.  Until the problem is fixed, one can run Idle with
+the -n command line switch.</p>
+<p>If IDLE is started with the -n command line switch it will run in a
+single process and will not create the subprocess which runs the RPC
+Python execution server.  This can be useful if Python cannot create
+the subprocess or the RPC socket interface on your platform.  However,
+in this mode user code is not isolated from IDLE itself.  Also, the
+environment is not restarted when Run/Run Module (F5) is selected.  If
+your code has been modified, you must reload() the affected modules and
+re-import any specific items (e.g. from foo import baz) if the changes
+are to take effect.  For these reasons, it is preferable to run IDLE
+with the default subprocess if at all possible.</p>
+<div class="deprecated">
+<p><span class="versionmodified">Deprecated since version 3.4.</span></p>
+</div>
+</div>
+</div>
+<div class="section" id="help-and-preferences">
+<h2>24.6.4. Help and preferences<a class="headerlink" href="#help-and-preferences" title="Permalink to this headline">¶</a></h2>
+<div class="section" id="additional-help-sources">
+<h3>24.6.4.1. Additional help sources<a class="headerlink" href="#additional-help-sources" title="Permalink to this headline">¶</a></h3>
+<p>IDLE includes a help menu entry called &#8220;Python Docs&#8221; that will open the
+extensive sources of help, including tutorials, available at docs.python.org.
+Selected URLs can be added or removed from the help menu at any time using the
+Configure IDLE dialog. See the IDLE help option in the help menu of IDLE for
+more information.</p>
+</div>
+<div class="section" id="setting-preferences">
+<h3>24.6.4.2. Setting preferences<a class="headerlink" href="#setting-preferences" title="Permalink to this headline">¶</a></h3>
+<p>The font preferences, highlighting, keys, and general preferences can be
+changed via Configure IDLE on the Option menu.  Keys can be user defined;
+IDLE ships with four built in key sets. In addition a user can create a
+custom key set in the Configure IDLE dialog under the keys tab.</p>
+</div>
+<div class="section" id="extensions">
+<h3>24.6.4.3. Extensions<a class="headerlink" href="#extensions" title="Permalink to this headline">¶</a></h3>
+<p>IDLE contains an extension facility.  Peferences for extensions can be
+changed with Configure Extensions. See the beginning of config-extensions.def
+in the idlelib directory for further information.  The default extensions
+are currently:</p>
+<ul class="simple">
+<li>FormatParagraph</li>
+<li>AutoExpand</li>
+<li>ZoomHeight</li>
+<li>ScriptBinding</li>
+<li>CallTips</li>
+<li>ParenMatch</li>
+<li>AutoComplete</li>
+<li>CodeContext</li>
+<li>RstripExtension</li>
+</ul>
+</div>
+</div>
+</div>
+
+
+          </div>
+        </div>
+      </div>
+      <div class="sphinxsidebar">
+        <div class="sphinxsidebarwrapper">
+  <h3><a href="../contents.html">Table Of Contents</a></h3>
+  <ul>
+<li><a class="reference internal" href="#">24.6. IDLE</a><ul>
+<li><a class="reference internal" href="#menus">24.6.1. Menus</a><ul>
+<li><a class="reference internal" href="#file-menu-shell-and-editor">24.6.1.1. File menu (Shell and Editor)</a></li>
+<li><a class="reference internal" href="#edit-menu-shell-and-editor">24.6.1.2. Edit menu (Shell and Editor)</a></li>
+<li><a class="reference internal" href="#format-menu-editor-window-only">24.6.1.3. Format menu (Editor window only)</a></li>
+<li><a class="reference internal" href="#run-menu-editor-window-only">24.6.1.4. Run menu (Editor window only)</a></li>
+<li><a class="reference internal" href="#shell-menu-shell-window-only">24.6.1.5. Shell menu (Shell window only)</a></li>
+<li><a class="reference internal" href="#debug-menu-shell-window-only">24.6.1.6. Debug menu (Shell window only)</a></li>
+<li><a class="reference internal" href="#options-menu-shell-and-editor">24.6.1.7. Options menu (Shell and Editor)</a></li>
+<li><a class="reference internal" href="#window-menu-shell-and-editor">24.6.1.8. Window menu (Shell and Editor)</a></li>
+<li><a class="reference internal" href="#help-menu-shell-and-editor">24.6.1.9. Help menu (Shell and Editor)</a></li>
+<li><a class="reference internal" href="#context-menus">24.6.1.10. Context Menus</a></li>
+</ul>
+</li>
+<li><a class="reference internal" href="#editing-and-navigation">24.6.2. Editing and navigation</a><ul>
+<li><a class="reference internal" href="#automatic-indentation">24.6.2.1. Automatic indentation</a></li>
+<li><a class="reference internal" href="#completions">24.6.2.2. Completions</a></li>
+<li><a class="reference internal" href="#calltips">24.6.2.3. Calltips</a></li>
+<li><a class="reference internal" href="#python-shell-window">24.6.2.4. Python Shell window</a></li>
+<li><a class="reference internal" href="#text-colors">24.6.2.5. Text colors</a></li>
+</ul>
+</li>
+<li><a class="reference internal" href="#startup-and-code-execution">24.6.3. Startup and code execution</a><ul>
+<li><a class="reference internal" href="#command-line-usage">24.6.3.1. Command line usage</a></li>
+<li><a class="reference internal" href="#idle-console-differences">24.6.3.2. IDLE-console differences</a></li>
+<li><a class="reference internal" href="#running-without-a-subprocess">24.6.3.3. Running without a subprocess</a></li>
+</ul>
+</li>
+<li><a class="reference internal" href="#help-and-preferences">24.6.4. Help and preferences</a><ul>
+<li><a class="reference internal" href="#additional-help-sources">24.6.4.1. Additional help sources</a></li>
+<li><a class="reference internal" href="#setting-preferences">24.6.4.2. Setting preferences</a></li>
+<li><a class="reference internal" href="#extensions">24.6.4.3. Extensions</a></li>
+</ul>
+</li>
+</ul>
+</li>
+</ul>
+
+  <h4>Previous topic</h4>
+  <p class="topless"><a href="turtle.html"
+                        title="previous chapter">24.5. <tt class="docutils literal"><span class="pre">turtle</span></tt> &#8212; Turtle graphics for Tk</a></p>
+  <h4>Next topic</h4>
+  <p class="topless"><a href="othergui.html"
+                        title="next chapter">24.7. Other Graphical User Interface Packages</a></p>
+<h3>This Page</h3>
+<ul class="this-page-menu">
+  <li><a href="../bugs.html">Report a Bug</a></li>
+  <li><a href="../_sources/library/idle.txt"
+         rel="nofollow">Show Source</a></li>
+</ul>
+
+<div id="searchbox" style="display: none">
+  <h3>Quick search</h3>
+    <form class="search" action="../search.html" method="get">
+      <input type="text" name="q" />
+      <input type="submit" value="Go" />
+      <input type="hidden" name="check_keywords" value="yes" />
+      <input type="hidden" name="area" value="default" />
+    </form>
+    <p class="searchtip" style="font-size: 90%">
+    Enter search terms or a module, class or function name.
+    </p>
+</div>
+<script type="text/javascript">$('#searchbox').show(0);</script>
+        </div>
+      </div>
+      <div class="clearer"></div>
+    </div>
+    <div class="related">
+      <h3>Navigation</h3>
+      <ul>
+        <li class="right" style="margin-right: 10px">
+          <a href="../genindex.html" title="General Index"
+             >index</a></li>
+        <li class="right" >
+          <a href="../py-modindex.html" title="Python Module Index"
+             >modules</a> |</li>
+        <li class="right" >
+          <a href="othergui.html" title="24.7. Other Graphical User Interface Packages"
+             >next</a> |</li>
+        <li class="right" >
+          <a href="turtle.html" title="24.5. turtle — Turtle graphics for Tk"
+             >previous</a> |</li>
+        <li><img src="../_static/py.png" alt=""
+                 style="vertical-align: middle; margin-top: -1px"/></li>
+        <li><a href="https://www.python.org/">Python</a> &raquo;</li>
+        <li>
+          <a href="../index.html">Python 2.7.10 documentation</a> &raquo;
+        </li>
+
+          <li><a href="index.html" >The Python Standard Library</a> &raquo;</li>
+          <li><a href="tk.html" >24. Graphical User Interfaces with Tk</a> &raquo;</li>
+      </ul>
+    </div>
+    <div class="footer">
+    &copy; <a href="../copyright.html">Copyright</a> 1990-2015, Python Software Foundation.
+    <br />
+    The Python Software Foundation is a non-profit corporation.
+    <a href="https://www.python.org/psf/donations/">Please donate.</a>
+    <br />
+    Last updated on Oct 13, 2015.
+    <a href="../bugs.html">Found a bug</a>?
+    <br />
+    Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.2.3.
+    </div>
+
+  </body>
+</html>
diff --git a/lib/python2.7/idlelib/help.py b/lib/python2.7/idlelib/help.py
new file mode 100644
index 0000000..b0e9dc5
--- /dev/null
+++ b/lib/python2.7/idlelib/help.py
@@ -0,0 +1,252 @@
+""" help.py: Implement the Idle help menu.
+Contents are subject to revision at any time, without notice.
+
+
+Help => About IDLE: diplay About Idle dialog
+
+<to be moved here from aboutDialog.py>
+
+
+Help => IDLE Help: Display help.html with proper formatting.
+Doc/library/idle.rst (Sphinx)=> Doc/build/html/library/idle.html
+(help.copy_strip)=> Lib/idlelib/help.html
+
+HelpParser - Parse help.html and render to tk Text.
+
+HelpText - Display formatted help.html.
+
+HelpFrame - Contain text, scrollbar, and table-of-contents.
+(This will be needed for display in a future tabbed window.)
+
+HelpWindow - Display HelpFrame in a standalone window.
+
+copy_strip - Copy idle.html to help.html, rstripping each line.
+
+show_idlehelp - Create HelpWindow.  Called in EditorWindow.help_dialog.
+"""
+from HTMLParser import HTMLParser
+from os.path import abspath, dirname, isdir, isfile, join
+from Tkinter import Tk, Toplevel, Frame, Text, Scrollbar, Menu, Menubutton
+import tkFont as tkfont
+from idlelib.configHandler import idleConf
+
+use_ttk = False # until available to import
+if use_ttk:
+    from tkinter.ttk import Menubutton
+
+## About IDLE ##
+
+
+## IDLE Help ##
+
+class HelpParser(HTMLParser):
+    """Render help.html into a text widget.
+
+    The overridden handle_xyz methods handle a subset of html tags.
+    The supplied text should have the needed tag configurations.
+    The behavior for unsupported tags, such as table, is undefined.
+    """
+    def __init__(self, text):
+        HTMLParser.__init__(self)
+        self.text = text         # text widget we're rendering into
+        self.tags = ''           # current block level text tags to apply
+        self.chartags = ''       # current character level text tags
+        self.show = False        # used so we exclude page navigation
+        self.hdrlink = False     # used so we don't show header links
+        self.level = 0           # indentation level
+        self.pre = False         # displaying preformatted text
+        self.hprefix = ''        # prefix such as '25.5' to strip from headings
+        self.nested_dl = False   # if we're in a nested <dl>
+        self.simplelist = False  # simple list (no double spacing)
+        self.toc = []            # pair headers with text indexes for toc
+        self.header = ''         # text within header tags for toc
+
+    def indent(self, amt=1):
+        self.level += amt
+        self.tags = '' if self.level == 0 else 'l'+str(self.level)
+
+    def handle_starttag(self, tag, attrs):
+        "Handle starttags in help.html."
+        class_ = ''
+        for a, v in attrs:
+            if a == 'class':
+                class_ = v
+        s = ''
+        if tag == 'div' and class_ == 'section':
+            self.show = True    # start of main content
+        elif tag == 'div' and class_ == 'sphinxsidebar':
+            self.show = False   # end of main content
+        elif tag == 'p' and class_ != 'first':
+            s = '\n\n'
+        elif tag == 'span' and class_ == 'pre':
+            self.chartags = 'pre'
+        elif tag == 'span' and class_ == 'versionmodified':
+            self.chartags = 'em'
+        elif tag == 'em':
+            self.chartags = 'em'
+        elif tag in ['ul', 'ol']:
+            if class_.find('simple') != -1:
+                s = '\n'
+                self.simplelist = True
+            else:
+                self.simplelist = False
+            self.indent()
+        elif tag == 'dl':
+            if self.level > 0:
+                self.nested_dl = True
+        elif tag == 'li':
+            s = '\n* ' if self.simplelist else '\n\n* '
+        elif tag == 'dt':
+            s = '\n\n' if not self.nested_dl else '\n'  # avoid extra line
+            self.nested_dl = False
+        elif tag == 'dd':
+            self.indent()
+            s = '\n'
+        elif tag == 'pre':
+            self.pre = True
+            if self.show:
+                self.text.insert('end', '\n\n')
+            self.tags = 'preblock'
+        elif tag == 'a' and class_ == 'headerlink':
+            self.hdrlink = True
+        elif tag == 'h1':
+            self.tags = tag
+        elif tag in ['h2', 'h3']:
+            if self.show:
+                self.header = ''
+                self.text.insert('end', '\n\n')
+            self.tags = tag
+        if self.show:
+            self.text.insert('end', s, (self.tags, self.chartags))
+
+    def handle_endtag(self, tag):
+        "Handle endtags in help.html."
+        if tag in ['h1', 'h2', 'h3']:
+            self.indent(0)  # clear tag, reset indent
+            if self.show:
+                self.toc.append((self.header, self.text.index('insert')))
+        elif tag in ['span', 'em']:
+            self.chartags = ''
+        elif tag == 'a':
+            self.hdrlink = False
+        elif tag == 'pre':
+            self.pre = False
+            self.tags = ''
+        elif tag in ['ul', 'dd', 'ol']:
+            self.indent(amt=-1)
+
+    def handle_data(self, data):
+        "Handle date segments in help.html."
+        if self.show and not self.hdrlink:
+            d = data if self.pre else data.replace('\n', ' ')
+            if self.tags == 'h1':
+                self.hprefix = d[0:d.index(' ')]
+            if self.tags in ['h1', 'h2', 'h3'] and self.hprefix != '':
+                if d[0:len(self.hprefix)] == self.hprefix:
+                    d = d[len(self.hprefix):].strip()
+                self.header += d
+            self.text.insert('end', d, (self.tags, self.chartags))
+
+    def handle_charref(self, name):
+        self.text.insert('end', unichr(int(name)))
+
+
+class HelpText(Text):
+    "Display help.html."
+    def __init__(self, parent, filename):
+        "Configure tags and feed file to parser."
+        uwide = idleConf.GetOption('main', 'EditorWindow', 'width', type='int')
+        uhigh = idleConf.GetOption('main', 'EditorWindow', 'height', type='int')
+        uhigh = 3 * uhigh // 4  # lines average 4/3 of editor line height
+        Text.__init__(self, parent, wrap='word', highlightthickness=0,
+                      padx=5, borderwidth=0, width=uwide, height=uhigh)
+
+        normalfont = self.findfont(['TkDefaultFont', 'arial', 'helvetica'])
+        fixedfont = self.findfont(['TkFixedFont', 'monaco', 'courier'])
+        self['font'] = (normalfont, 12)
+        self.tag_configure('em', font=(normalfont, 12, 'italic'))
+        self.tag_configure('h1', font=(normalfont, 20, 'bold'))
+        self.tag_configure('h2', font=(normalfont, 18, 'bold'))
+        self.tag_configure('h3', font=(normalfont, 15, 'bold'))
+        self.tag_configure('pre', font=(fixedfont, 12), background='#f6f6ff')
+        self.tag_configure('preblock', font=(fixedfont, 10), lmargin1=25,
+                borderwidth=1, relief='solid', background='#eeffcc')
+        self.tag_configure('l1', lmargin1=25, lmargin2=25)
+        self.tag_configure('l2', lmargin1=50, lmargin2=50)
+        self.tag_configure('l3', lmargin1=75, lmargin2=75)
+        self.tag_configure('l4', lmargin1=100, lmargin2=100)
+
+        self.parser = HelpParser(self)
+        with open(filename) as f:
+            contents = f.read().decode(encoding='utf-8')
+        self.parser.feed(contents)
+        self['state'] = 'disabled'
+
+    def findfont(self, names):
+        "Return name of first font family derived from names."
+        for name in names:
+            if name.lower() in (x.lower() for x in tkfont.names(root=self)):
+                font = tkfont.Font(name=name, exists=True, root=self)
+                return font.actual()['family']
+            elif name.lower() in (x.lower()
+                                  for x in tkfont.families(root=self)):
+                return name
+
+
+class HelpFrame(Frame):
+    "Display html text, scrollbar, and toc."
+    def __init__(self, parent, filename):
+        Frame.__init__(self, parent)
+        text = HelpText(self, filename)
+        self['background'] = text['background']
+        scroll = Scrollbar(self, command=text.yview)
+        text['yscrollcommand'] = scroll.set
+        self.rowconfigure(0, weight=1)
+        self.columnconfigure(1, weight=1)  # text
+        self.toc_menu(text).grid(column=0, row=0, sticky='nw')
+        text.grid(column=1, row=0, sticky='nsew')
+        scroll.grid(column=2, row=0, sticky='ns')
+
+    def toc_menu(self, text):
+        "Create table of contents as drop-down menu."
+        toc = Menubutton(self, text='TOC')
+        drop = Menu(toc, tearoff=False)
+        for lbl, dex in text.parser.toc:
+            drop.add_command(label=lbl, command=lambda dex=dex:text.yview(dex))
+        toc['menu'] = drop
+        return toc
+
+
+class HelpWindow(Toplevel):
+    "Display frame with rendered html."
+    def __init__(self, parent, filename, title):
+        Toplevel.__init__(self, parent)
+        self.wm_title(title)
+        self.protocol("WM_DELETE_WINDOW", self.destroy)
+        HelpFrame(self, filename).grid(column=0, row=0, sticky='nsew')
+        self.grid_columnconfigure(0, weight=1)
+        self.grid_rowconfigure(0, weight=1)
+
+
+def copy_strip():
+    "Copy idle.html to idlelib/help.html, stripping trailing whitespace."
+    src = join(abspath(dirname(dirname(dirname(__file__)))),
+               'Doc', 'build', 'html', 'library', 'idle.html')
+    dst = join(abspath(dirname(__file__)), 'help.html')
+    with open(src, 'r') as inn,\
+         open(dst, 'w') as out:
+        for line in inn:
+            out.write(line.rstrip() + '\n')
+    print('idle.html copied to help.html')
+
+def show_idlehelp(parent):
+    "Create HelpWindow; called from Idle Help event handler."
+    filename = join(abspath(dirname(__file__)), 'help.html')
+    if not isfile(filename):
+        # try copy_strip, present message
+        return
+    HelpWindow(parent, filename, 'IDLE Help')
+
+if __name__ == '__main__':
+    from idlelib.idle_test.htest import run
+    run(show_idlehelp)
diff --git a/lib/python2.7/idlelib/help.txt b/lib/python2.7/idlelib/help.txt
index 6b1c002..296c78b 100644
--- a/lib/python2.7/idlelib/help.txt
+++ b/lib/python2.7/idlelib/help.txt
@@ -1,7 +1,8 @@
-[See the end of this file for ** TIPS ** on using IDLE !!]
+This file, idlelib/help.txt is out-of-date and no longer used by Idle.
+It is deprecated and will be removed in the future, possibly in 3.6
+----------------------------------------------------------------------
 
-Click on the dotted line at the top of a menu to "tear it off": a
-separate window containing the menu is created.
+[See the end of this file for ** TIPS ** on using IDLE !!]
 
 File Menu:
 
@@ -90,10 +91,9 @@
 	Configure IDLE -- Open a configuration dialog.  Fonts, indentation,
                           keybindings, and color themes may be altered.
                           Startup Preferences may be set, and Additional Help
-                          Sources can be specified.
-			  
-			  On OS X this menu is not present, use
-			  menu 'IDLE -> Preferences...' instead.
+                          Sources can be specified.  On OS X, open the
+                          configuration dialog by selecting Preferences
+                          in the application menu.
 	---
 	Code Context --	  Open a pane at the top of the edit window which
 			  shows the block context of the section of code
diff --git a/lib/python2.7/idlelib/idle.pyw b/lib/python2.7/idlelib/idle.pyw
index 537dd5a..9ce4c9f 100644
--- a/lib/python2.7/idlelib/idle.pyw
+++ b/lib/python2.7/idlelib/idle.pyw
@@ -2,20 +2,16 @@
     import idlelib.PyShell
 except ImportError:
     # IDLE is not installed, but maybe PyShell is on sys.path:
-    try:
-        import PyShell
-    except ImportError:
-        raise
-    else:
-        import os
-        idledir = os.path.dirname(os.path.abspath(PyShell.__file__))
-        if idledir != os.getcwd():
-            # We're not in the IDLE directory, help the subprocess find run.py
-            pypath = os.environ.get('PYTHONPATH', '')
-            if pypath:
-                os.environ['PYTHONPATH'] = pypath + ':' + idledir
-            else:
-                os.environ['PYTHONPATH'] = idledir
-        PyShell.main()
+    import PyShell
+    import os
+    idledir = os.path.dirname(os.path.abspath(PyShell.__file__))
+    if idledir != os.getcwd():
+        # We're not in the IDLE directory, help the subprocess find run.py
+        pypath = os.environ.get('PYTHONPATH', '')
+        if pypath:
+            os.environ['PYTHONPATH'] = pypath + ':' + idledir
+        else:
+            os.environ['PYTHONPATH'] = idledir
+    PyShell.main()
 else:
     idlelib.PyShell.main()
diff --git a/lib/python2.7/idlelib/idle_test/README.txt b/lib/python2.7/idlelib/idle_test/README.txt
index f6b6a21..2339926 100644
--- a/lib/python2.7/idlelib/idle_test/README.txt
+++ b/lib/python2.7/idlelib/idle_test/README.txt
@@ -1,14 +1,24 @@
 README FOR IDLE TESTS IN IDLELIB.IDLE_TEST
 
+0. Quick Start
+
+Automated unit tests were added in 2.7 for Python 2.x and 3.3 for Python 3.x.
+To run the tests from a command line:
+
+python -m test.test_idle
+
+Human-mediated tests were added later in 2.7 and in 3.4.
+
+python -m idlelib.idle_test.htest
+
 
 1. Test Files
 
 The idle directory, idlelib, has over 60 xyz.py files. The idle_test
-subdirectory should contain a test_xyy.py for each. (For test modules, make
-'xyz' lower case, and possibly shorten it.) Each file should start with the
-something like the following template, with the blanks after after '.' and 'as',
-and before and after '_' filled in.
----
+subdirectory should contain a test_xyz.py for each, where 'xyz' is lowercased
+even if xyz.py is not. Here is a possible template, with the blanks after after
+'.' and 'as', and before and after '_' to be filled in.
+
 import unittest
 from test.support import requires
 import idlelib. as
@@ -18,33 +28,33 @@
     def test_(self):
 
 if __name__ == '__main__':
-    unittest.main(verbosity=2, exit=2)
----
-Idle tests are run with unittest; do not use regrtest's test_main.
+    unittest.main(verbosity=2)
 
-Once test_xyy is written, the following should go at the end of xyy.py,
-with xyz (lowercased) added after 'test_'.
----
+Add the following at the end of xyy.py, with the appropriate name added after
+'test_'. Some files already have something like this for htest.  If so, insert
+the import and unittest.main lines before the htest lines.
+
 if __name__ == "__main__":
     import unittest
     unittest.main('idlelib.idle_test.test_', verbosity=2, exit=False)
----
 
 
-2. Gui Tests
 
-Gui tests need 'requires' from test.support (test.test_support in 2.7). A
-test is a gui test if it creates a Tk root or master object either directly
-or indirectly by instantiating a tkinter or idle class. For the benefit of
-test processes that either have no graphical environment available or are not
-allowed to use it, gui tests must be 'guarded' by "requires('gui')" in a
-setUp function or method. This will typically be setUpClass.
+2. GUI Tests
 
-To avoid interfering with other gui tests, all gui objects must be destroyed
-and deleted by the end of the test.  If a widget, such as a Tk root, is created
-in a setUpX function, destroy it in the corresponding tearDownX.  For module
-and class attributes, also delete the widget.
----
+When run as part of the Python test suite, Idle gui tests need to run
+test.support.requires('gui') (test.test_support in 2.7).  A test is a gui test
+if it creates a Tk root or master object either directly or indirectly by
+instantiating a tkinter or idle class.  For the benefit of test processes that
+either have no graphical environment available or are not allowed to use it, gui
+tests must be 'guarded' by "requires('gui')" in a setUp function or method.
+This will typically be setUpClass.
+
+To avoid interfering with other gui tests, all gui objects must be destroyed and
+deleted by the end of the test.  Widgets, such as a Tk root, created in a setUpX
+function, should be destroyed in the corresponding tearDownX.  Module and class
+widget attributes should also be deleted..
+
     @classmethod
     def setUpClass(cls):
         requires('gui')
@@ -54,49 +64,55 @@
     def tearDownClass(cls):
         cls.root.destroy()
         del cls.root
----
 
-Support.requires('gui') causes the test(s) it guards to be skipped if any of
+
+Requires('gui') causes the test(s) it guards to be skipped if any of
 a few conditions are met:
- - The tests are being run by regrtest.py, and it was started without
-   enabling the "gui" resource with the "-u" command line option.
+    
+ - The tests are being run by regrtest.py, and it was started without enabling
+   the "gui" resource with the "-u" command line option.
+   
  - The tests are being run on Windows by a service that is not allowed to
    interact with the graphical environment.
+   
  - The tests are being run on Mac OSX in a process that cannot make a window
    manager connection.
+   
  - tkinter.Tk cannot be successfully instantiated for some reason.
+ 
  - test.support.use_resources has been set by something other than
    regrtest.py and does not contain "gui".
-
-Since non-gui tests always run, but gui tests only sometimes, tests of non-gui
-operations should best avoid needing a gui. Methods that make incidental use of
-tkinter (tk) variables and messageboxes can do this by using the mock classes in
-idle_test/mock_tk.py. There is also a mock text that will handle some uses of the
-tk Text widget.
+   
+Tests of non-gui operations should avoid creating tk widgets. Incidental uses of
+tk variables and messageboxes can be replaced by the mock classes in
+idle_test/mock_tk.py. The mock text handles some uses of the tk Text widget.
 
 
-3. Running Tests
+3. Running Unit Tests
 
-Assume that xyz.py and test_xyz.py end with the "if __name__" statements given
-above. In Idle, pressing F5 in an editor window with either loaded will run all
-tests in the test_xyz file with the version of Python running Idle.  The test
-report and any tracebacks will appear in the Shell window. The options in these
-"if __name__" statements are appropriate for developers running (as opposed to
-importing) either of the files during development: verbosity=2 lists all test
-methods in the file; exit=False avoids a spurious sys.exit traceback that would
-otherwise occur when running in Idle. The following command lines also run
-all test methods, including gui tests, in test_xyz.py. (The exceptions are that
-idlelib and idlelib.idle start Idle and idlelib.PyShell should (issue 18330).)
+Assume that xyz.py and test_xyz.py both end with a unittest.main() call.
+Running either from an Idle editor runs all tests in the test_xyz file with the
+version of Python running Idle.  Test output appears in the Shell window.  The
+'verbosity=2' option lists all test methods in the file, which is appropriate
+when developing tests. The 'exit=False' option is needed in xyx.py files when an
+htest follows.
 
-python -m idlelib.xyz  # With the capitalization of the xyz module
+The following command lines also run all test methods, including
+gui tests, in test_xyz.py. (Both '-m idlelib' and '-m idlelib.idle' start
+Idle and so cannot run tests.)
+
+python -m idlelib.xyz
 python -m idlelib.idle_test.test_xyz
 
-To run all idle_test/test_*.py tests, either interactively
-('>>>', with unittest imported) or from a command line, use one of the
-following. (Notes: in 2.7, 'test ' (with the space) is 'test.regrtest ';
-where present, -v and -ugui can be omitted.)
+The following runs all idle_test/test_*.py tests interactively.
 
->>> unittest.main('idlelib.idle_test', verbosity=2, exit=False)
+>>> import unittest
+>>> unittest.main('idlelib.idle_test', verbosity=2)
+
+The following run all Idle tests at a command line.  Option '-v' is the same as
+'verbosity=2'.  (For 2.7, replace 'test' in the second line with
+'test.regrtest'.)
+
 python -m unittest -v idlelib.idle_test
 python -m test -v -ugui test_idle
 python -m test.test_idle
@@ -113,3 +129,15 @@
 unittest on the command line.
 
 python -m unittest -v idlelib.idle_test.test_xyz.Test_case.test_meth
+
+
+4. Human-mediated Tests
+
+Human-mediated tests are widget tests that cannot be automated but need human
+verification. They are contained in idlelib/idle_test/htest.py, which has
+instructions.  (Some modules need an auxiliary function, identified with # htest
+# on the header line.)  The set is about complete, though some tests need
+improvement. To run all htests, run the htest file from an editor or from the
+command line with:
+
+python -m idlelib.idle_test.htest
diff --git a/lib/python2.7/idlelib/idle_test/htest.py b/lib/python2.7/idlelib/idle_test/htest.py
index 27377ae..f341409 100644
--- a/lib/python2.7/idlelib/idle_test/htest.py
+++ b/lib/python2.7/idlelib/idle_test/htest.py
@@ -93,15 +93,6 @@
            "Double clicking on items prints a traceback for an exception "
            "that is ignored."
     }
-ConfigExtensionsDialog_spec = {
-    'file': 'configDialog',
-    'kwds': {'title': 'Test Extension Configuration',
-             '_htest': True,},
-    'msg': "IDLE extensions dialog.\n"
-           "\n[Ok] to close the dialog.[Apply] to apply the settings and "
-           "and [Cancel] to revert all changes.\nRe-run the test to ensure "
-           "changes made have persisted."
-    }
 
 _color_delegator_spec = {
     'file': 'ColorDelegator',
@@ -121,7 +112,8 @@
            "font face of the text in the area below it.\nIn the "
            "'Highlighting' tab, try different color schemes. Clicking "
            "items in the sample program should update the choices above it."
-           "\nIn the 'Keys' and 'General' tab, test settings of interest."
+           "\nIn the 'Keys', 'General' and 'Extensions' tabs, test settings"
+           "of interest."
            "\n[Ok] to close the dialog.[Apply] to apply the settings and "
            "and [Cancel] to revert all changes.\nRe-run the test to ensure "
            "changes made have persisted."
@@ -194,19 +186,17 @@
            "should open that file \nin a new EditorWindow."
     }
 
-_help_dialog_spec = {
-    'file': 'EditorWindow',
-    'kwds': {},
-    'msg': "If the help text displays, this works.\n"
-           "Text is selectable. Window is scrollable."
-    }
-
 _io_binding_spec = {
     'file': 'IOBinding',
     'kwds': {},
-    'msg': "Test the following bindings\n"
-           "<Control-o> to display open window from file dialog.\n"
-           "<Control-s> to save the file\n"
+    'msg': "Test the following bindings.\n"
+           "<Control-o> to open file from dialog.\n"
+           "Edit the file.\n"
+           "<Control-p> to print the file.\n"
+           "<Control-s> to save the file.\n"
+           "<Alt-s> to save-as another file.\n"
+           "<Control-c> to save-copy-as another file.\n"
+           "Check that changes were saved by opening the file elsewhere."
     }
 
 _multi_call_spec = {
@@ -279,6 +269,13 @@
            "Right clicking an item will display a popup."
     }
 
+show_idlehelp_spec = {
+    'file': 'help',
+    'kwds': {},
+    'msg': "If the help text displays, this works.\n"
+           "Text is selectable. Window is scrollable."
+    }
+
 _stack_viewer_spec = {
     'file': 'StackViewer',
     'kwds': {},
diff --git a/lib/python2.7/idlelib/idle_test/test_autocomplete.py b/lib/python2.7/idlelib/idle_test/test_autocomplete.py
index ee9d0ed..c2a7266 100644
--- a/lib/python2.7/idlelib/idle_test/test_autocomplete.py
+++ b/lib/python2.7/idlelib/idle_test/test_autocomplete.py
@@ -1,6 +1,6 @@
 import unittest
 from test.test_support import requires
-from Tkinter import Tk, Text, TclError
+from Tkinter import Tk, Text
 
 import idlelib.AutoComplete as ac
 import idlelib.AutoCompleteWindow as acw
@@ -95,8 +95,8 @@
         del ev.mc_state
 
         # If autocomplete window is open, complete() method is called
-        testwin = self.autocomplete._make_autocomplete_window()
         self.text.insert('1.0', 're.')
+        # This must call autocomplete._make_autocomplete_window()
         Equal(self.autocomplete.autocomplete_event(ev), 'break')
 
         # If autocomplete window is not active or does not exist,
diff --git a/lib/python2.7/idlelib/idle_test/test_formatparagraph.py b/lib/python2.7/idlelib/idle_test/test_formatparagraph.py
index 07bbf16..9185a96 100644
--- a/lib/python2.7/idlelib/idle_test/test_formatparagraph.py
+++ b/lib/python2.7/idlelib/idle_test/test_formatparagraph.py
@@ -2,7 +2,7 @@
 import unittest
 from idlelib import FormatParagraph as fp
 from idlelib.EditorWindow import EditorWindow
-from Tkinter import Tk, Text, TclError
+from Tkinter import Tk, Text
 from test.test_support import requires
 
 
diff --git a/lib/python2.7/idlelib/idle_test/test_pathbrowser.py b/lib/python2.7/idlelib/idle_test/test_pathbrowser.py
index 7ad7c97..f028414 100644
--- a/lib/python2.7/idlelib/idle_test/test_pathbrowser.py
+++ b/lib/python2.7/idlelib/idle_test/test_pathbrowser.py
@@ -1,5 +1,8 @@
 import unittest
-import idlelib.PathBrowser as PathBrowser
+import os
+import sys
+import idlelib
+from idlelib import PathBrowser
 
 class PathBrowserTest(unittest.TestCase):
 
@@ -7,6 +10,19 @@
         # Issue16226 - make sure that getting a sublist works
         d = PathBrowser.DirBrowserTreeItem('')
         d.GetSubList()
+        self.assertEqual('', d.GetText())
+
+        dir = os.path.split(os.path.abspath(idlelib.__file__))[0]
+        self.assertEqual(d.ispackagedir(dir), True)
+        self.assertEqual(d.ispackagedir(dir + '/Icons'), False)
+
+    def test_PathBrowserTreeItem(self):
+        p = PathBrowser.PathBrowserTreeItem()
+        self.assertEqual(p.GetText(), 'sys.path')
+        sub = p.GetSubList()
+        self.assertEqual(len(sub), len(sys.path))
+        # Following fails in 2.7 because old-style class
+        #self.assertEqual(type(sub[0]), PathBrowser.DirBrowserTreeItem)
 
 if __name__ == '__main__':
     unittest.main(verbosity=2, exit=False)
diff --git a/lib/python2.7/idlelib/idle_test/test_searchdialogbase.py b/lib/python2.7/idlelib/idle_test/test_searchdialogbase.py
index c1bee3e..32abfe6 100644
--- a/lib/python2.7/idlelib/idle_test/test_searchdialogbase.py
+++ b/lib/python2.7/idlelib/idle_test/test_searchdialogbase.py
@@ -6,14 +6,13 @@
 '''
 import unittest
 from test.test_support import requires
-from Tkinter import Tk, Toplevel, Frame, Label, BooleanVar, StringVar
+from Tkinter import Tk, Toplevel, Frame ## BooleanVar, StringVar
 from idlelib import SearchEngine as se
 from idlelib import SearchDialogBase as sdb
 from idlelib.idle_test.mock_idle import Func
-from idlelib.idle_test.mock_tk import Var, Mbox
+##from idlelib.idle_test.mock_tk import Var
 
-# The following could help make some tests gui-free.
-# However, they currently make radiobutton tests fail.
+# The ## imports above & following could help make some tests gui-free.# However, they currently make radiobutton tests fail.
 ##def setUpModule():
 ##    # Replace tk objects used to initialize se.SearchEngine.
 ##    se.BooleanVar = Var
diff --git a/lib/python2.7/idlelib/idle_test/test_searchengine.py b/lib/python2.7/idlelib/idle_test/test_searchengine.py
index 2525a13..8bf9d47 100644
--- a/lib/python2.7/idlelib/idle_test/test_searchengine.py
+++ b/lib/python2.7/idlelib/idle_test/test_searchengine.py
@@ -7,7 +7,7 @@
 
 import re
 import unittest
-from test.test_support import requires
+#from test.test_support import requires
 from Tkinter import  BooleanVar, StringVar, TclError  # ,Tk, Text
 import tkMessageBox
 from idlelib import SearchEngine as se
diff --git a/lib/python2.7/idlelib/idle_test/test_text.py b/lib/python2.7/idlelib/idle_test/test_text.py
index f0b9b76..50d3fac 100644
--- a/lib/python2.7/idlelib/idle_test/test_text.py
+++ b/lib/python2.7/idlelib/idle_test/test_text.py
@@ -3,7 +3,6 @@
 from test.test_support import requires
 
 from _tkinter import TclError
-import Tkinter as tk
 
 class TextTest(object):
 
diff --git a/lib/python2.7/idlelib/idle_test/test_warning.py b/lib/python2.7/idlelib/idle_test/test_warning.py
index da1d8a1..26710f1 100644
--- a/lib/python2.7/idlelib/idle_test/test_warning.py
+++ b/lib/python2.7/idlelib/idle_test/test_warning.py
@@ -68,6 +68,15 @@
                     'Test', UserWarning, 'test_warning.py', 99, f, 'Line of code')
             self.assertEqual(shellmsg.splitlines(), f.getvalue().splitlines())
 
+class ImportWarnTest(unittest.TestCase):
+    def test_idlever(self):
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            import idlelib.idlever
+            self.assertEqual(len(w), 1)
+            self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
+            self.assertIn("version", str(w[-1].message))
+
 
 if __name__ == '__main__':
     unittest.main(verbosity=2, exit=False)
diff --git a/lib/python2.7/idlelib/idlever.py b/lib/python2.7/idlelib/idlever.py
index 563d933..3e9f69a 100644
--- a/lib/python2.7/idlelib/idlever.py
+++ b/lib/python2.7/idlelib/idlever.py
@@ -1,4 +1,12 @@
-"""Unused by Idle: there is no separate Idle version anymore.
-Kept only for possible existing extension use."""
+"""
+The separate Idle version was eliminated years ago;
+idlelib.idlever is no longer used by Idle
+and will be removed in 3.6 or later.  Use
+    from sys import version
+    IDLE_VERSION = version[:version.index(' ')]
+"""
+# Kept for now only for possible existing extension use
+import warnings as w
+w.warn(__doc__, DeprecationWarning, stacklevel=2)
 from sys import version
 IDLE_VERSION = version[:version.index(' ')]
diff --git a/lib/python2.7/idlelib/macosxSupport.py b/lib/python2.7/idlelib/macosxSupport.py
index 10a06bb..041d700 100644
--- a/lib/python2.7/idlelib/macosxSupport.py
+++ b/lib/python2.7/idlelib/macosxSupport.py
@@ -125,11 +125,9 @@
     #
     # Due to a (mis-)feature of TkAqua the user will also see an empty Help
     # menu.
-    from Tkinter import Menu, Text, Text
-    from idlelib.EditorWindow import prepstr, get_accelerator
+    from Tkinter import Menu
     from idlelib import Bindings
     from idlelib import WindowList
-    from idlelib.MultiCall import MultiCallCreator
 
     closeItem = Bindings.menudefs[0][1][-2]
 
@@ -149,7 +147,7 @@
     root.configure(menu=menubar)
     menudict = {}
 
-    menudict['windows'] = menu = Menu(menubar, name='windows')
+    menudict['windows'] = menu = Menu(menubar, name='windows', tearoff=0)
     menubar.add_cascade(label='Window', menu=menu, underline=0)
 
     def postwindowsmenu(menu=menu):
@@ -163,18 +161,23 @@
     WindowList.register_callback(postwindowsmenu)
 
     def about_dialog(event=None):
+        "Handle Help 'About IDLE' event."
+        # Synchronize with EditorWindow.EditorWindow.about_dialog.
         from idlelib import aboutDialog
         aboutDialog.AboutDialog(root, 'About IDLE')
 
     def config_dialog(event=None):
+        "Handle Options 'Configure IDLE' event."
+        # Synchronize with EditorWindow.EditorWindow.config_dialog.
         from idlelib import configDialog
         root.instance_dict = flist.inversedict
         configDialog.ConfigDialog(root, 'Settings')
 
     def help_dialog(event=None):
-        from idlelib import textView
-        fn = path.join(path.abspath(path.dirname(__file__)), 'help.txt')
-        textView.view_file(root, 'Help', fn)
+        "Handle Help 'IDLE Help' event."
+        # Synchronize with EditorWindow.EditorWindow.help_dialog.
+        from idlelib import help
+        help.show_idlehelp(root)
 
     root.bind('<<about-idle>>', about_dialog)
     root.bind('<<open-config-dialog>>', config_dialog)
@@ -189,7 +192,8 @@
 
     if isCarbonTk():
         # for Carbon AquaTk, replace the default Tk apple menu
-        menudict['application'] = menu = Menu(menubar, name='apple')
+        menudict['application'] = menu = Menu(menubar, name='apple',
+                                              tearoff=0)
         menubar.add_cascade(label='IDLE', menu=menu)
         Bindings.menudefs.insert(0,
             ('application', [
diff --git a/lib/python2.7/idlelib/rpc.py b/lib/python2.7/idlelib/rpc.py
index 8f611a3..5c4aabd 100644
--- a/lib/python2.7/idlelib/rpc.py
+++ b/lib/python2.7/idlelib/rpc.py
@@ -332,10 +332,7 @@
                 n = self.sock.send(s[:BUFSIZE])
             except (AttributeError, TypeError):
                 raise IOError, "socket no longer exists"
-            except socket.error:
-                raise
-            else:
-                s = s[n:]
+            s = s[n:]
 
     buffer = ""
     bufneed = 4
diff --git a/lib/python2.7/idlelib/run.py b/lib/python2.7/idlelib/run.py
index 604c5cd..466c61e 100644
--- a/lib/python2.7/idlelib/run.py
+++ b/lib/python2.7/idlelib/run.py
@@ -1,5 +1,4 @@
 import sys
-import io
 import linecache
 import time
 import socket
@@ -165,7 +164,7 @@
         tkMessageBox.showerror("IDLE Subprocess Error", msg, parent=root)
     else:
         tkMessageBox.showerror("IDLE Subprocess Error",
-                               "Socket Error: %s" % err.args[1])
+                               "Socket Error: %s" % err.args[1], parent=root)
     root.destroy()
 
 def print_exception():
@@ -211,6 +210,8 @@
         fn, ln, nm, line = tb[i]
         if nm == '?':
             nm = "-toplevel-"
+        if fn.startswith("<pyshell#") and IOBinding.encoding != 'utf-8':
+            ln -= 1  # correction for coding cookie
         if not line and fn.startswith("<pyshell#"):
             line = rpchandler.remotecall('linecache', 'getline',
                                               (fn, ln), {})
diff --git a/lib/python2.7/idlelib/textView.py b/lib/python2.7/idlelib/textView.py
index eb60274..8687d40 100644
--- a/lib/python2.7/idlelib/textView.py
+++ b/lib/python2.7/idlelib/textView.py
@@ -21,7 +21,7 @@
         Toplevel.__init__(self, parent)
         self.configure(borderwidth=5)
         # place dialog below parent if running htest
-        self.geometry("=%dx%d+%d+%d" % (625, 500,
+        self.geometry("=%dx%d+%d+%d" % (750, 500,
                            parent.winfo_rootx() + 10,
                            parent.winfo_rooty() + (10 if not _htest else 100)))
         #elguavas - config placeholders til config stuff completed
diff --git a/lib/python2.7/inspect.py b/lib/python2.7/inspect.py
index 9336943..b08e145 100644
--- a/lib/python2.7/inspect.py
+++ b/lib/python2.7/inspect.py
@@ -969,8 +969,13 @@
         assign(varkw, named)
     elif named:
         unexpected = next(iter(named))
-        if isinstance(unexpected, unicode):
-            unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
+        try:
+            unicode
+        except NameError:
+            pass
+        else:
+            if isinstance(unexpected, unicode):
+                unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
         raise TypeError("%s() got an unexpected keyword argument '%s'" %
                         (f_name, unexpected))
     unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
diff --git a/lib/python2.7/json/__init__.py b/lib/python2.7/json/__init__.py
index 0be85da..140a3d0 100644
--- a/lib/python2.7/json/__init__.py
+++ b/lib/python2.7/json/__init__.py
@@ -195,10 +195,11 @@
         encoding='utf-8', default=None, sort_keys=False, **kw):
     """Serialize ``obj`` to a JSON formatted ``str``.
 
-    If ``skipkeys`` is false then ``dict`` keys that are not basic types
+    If ``skipkeys`` is true then ``dict`` keys that are not basic types
     (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
     will be skipped instead of raising a ``TypeError``.
 
+
     If ``ensure_ascii`` is false, all non-ASCII characters are not escaped, and
     the return value may be a ``unicode`` instance. See ``dump`` for details.
 
diff --git a/lib/python2.7/json/decoder.py b/lib/python2.7/json/decoder.py
index 1b43238..5141f87 100644
--- a/lib/python2.7/json/decoder.py
+++ b/lib/python2.7/json/decoder.py
@@ -15,10 +15,8 @@
 FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
 
 def _floatconstants():
-    _BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
-    if sys.byteorder != 'big':
-        _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
-    nan, inf = struct.unpack('dd', _BYTES)
+    nan, = struct.unpack('>d', b'\x7f\xf8\x00\x00\x00\x00\x00\x00')
+    inf, = struct.unpack('>d', b'\x7f\xf0\x00\x00\x00\x00\x00\x00')
     return nan, inf, -inf
 
 NaN, PosInf, NegInf = _floatconstants()
diff --git a/lib/python2.7/lib-dynload/Python-2.7.10-py2.7.egg-info b/lib/python2.7/lib-dynload/Python-2.7.11_-py2.7.egg-info
similarity index 98%
rename from lib/python2.7/lib-dynload/Python-2.7.10-py2.7.egg-info
rename to lib/python2.7/lib-dynload/Python-2.7.11_-py2.7.egg-info
index 90f3f8a..4cff0a8 100644
--- a/lib/python2.7/lib-dynload/Python-2.7.10-py2.7.egg-info
+++ b/lib/python2.7/lib-dynload/Python-2.7.11_-py2.7.egg-info
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: Python
-Version: 2.7.10
+Version: 2.7.11+
 Summary: A high-level object-oriented programming language
 Home-page: http://www.python.org/2.7
 Author: Guido van Rossum and the Python community
diff --git a/lib/python2.7/lib-dynload/_bisect.so b/lib/python2.7/lib-dynload/_bisect.so
index aedb40d..90ed935 100755
--- a/lib/python2.7/lib-dynload/_bisect.so
+++ b/lib/python2.7/lib-dynload/_bisect.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_codecs_cn.so b/lib/python2.7/lib-dynload/_codecs_cn.so
index 599306f..777a800 100755
--- a/lib/python2.7/lib-dynload/_codecs_cn.so
+++ b/lib/python2.7/lib-dynload/_codecs_cn.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_codecs_hk.so b/lib/python2.7/lib-dynload/_codecs_hk.so
index 8b9654c..93f86bf 100755
--- a/lib/python2.7/lib-dynload/_codecs_hk.so
+++ b/lib/python2.7/lib-dynload/_codecs_hk.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_codecs_iso2022.so b/lib/python2.7/lib-dynload/_codecs_iso2022.so
index 42c24b9..d828f8d 100755
--- a/lib/python2.7/lib-dynload/_codecs_iso2022.so
+++ b/lib/python2.7/lib-dynload/_codecs_iso2022.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_codecs_jp.so b/lib/python2.7/lib-dynload/_codecs_jp.so
index d87d7bf..e5a6f40 100755
--- a/lib/python2.7/lib-dynload/_codecs_jp.so
+++ b/lib/python2.7/lib-dynload/_codecs_jp.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_codecs_kr.so b/lib/python2.7/lib-dynload/_codecs_kr.so
index f1730d2..42f6355 100755
--- a/lib/python2.7/lib-dynload/_codecs_kr.so
+++ b/lib/python2.7/lib-dynload/_codecs_kr.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_codecs_tw.so b/lib/python2.7/lib-dynload/_codecs_tw.so
index 5ae6eec..eb28cfb 100755
--- a/lib/python2.7/lib-dynload/_codecs_tw.so
+++ b/lib/python2.7/lib-dynload/_codecs_tw.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_collections.so b/lib/python2.7/lib-dynload/_collections.so
index 02f2084..fe8de98 100755
--- a/lib/python2.7/lib-dynload/_collections.so
+++ b/lib/python2.7/lib-dynload/_collections.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_csv.so b/lib/python2.7/lib-dynload/_csv.so
index 3640c89..747e212 100755
--- a/lib/python2.7/lib-dynload/_csv.so
+++ b/lib/python2.7/lib-dynload/_csv.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_ctypes.so b/lib/python2.7/lib-dynload/_ctypes.so
index 3323ec3..13a582f 100755
--- a/lib/python2.7/lib-dynload/_ctypes.so
+++ b/lib/python2.7/lib-dynload/_ctypes.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_ctypes_test.so b/lib/python2.7/lib-dynload/_ctypes_test.so
index fad0d19..f621f71 100755
--- a/lib/python2.7/lib-dynload/_ctypes_test.so
+++ b/lib/python2.7/lib-dynload/_ctypes_test.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_curses.so b/lib/python2.7/lib-dynload/_curses.so
deleted file mode 100755
index 4467141..0000000
--- a/lib/python2.7/lib-dynload/_curses.so
+++ /dev/null
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_curses_panel.so b/lib/python2.7/lib-dynload/_curses_panel.so
deleted file mode 100755
index 8c56626..0000000
--- a/lib/python2.7/lib-dynload/_curses_panel.so
+++ /dev/null
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_elementtree.so b/lib/python2.7/lib-dynload/_elementtree.so
index bd3f536..2a2cf22 100755
--- a/lib/python2.7/lib-dynload/_elementtree.so
+++ b/lib/python2.7/lib-dynload/_elementtree.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_functools.so b/lib/python2.7/lib-dynload/_functools.so
index 132b8b5..3a9d322 100755
--- a/lib/python2.7/lib-dynload/_functools.so
+++ b/lib/python2.7/lib-dynload/_functools.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_hashlib.so b/lib/python2.7/lib-dynload/_hashlib.so
deleted file mode 100755
index f752342..0000000
--- a/lib/python2.7/lib-dynload/_hashlib.so
+++ /dev/null
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_heapq.so b/lib/python2.7/lib-dynload/_heapq.so
index 31e3f0a..e44f1be 100755
--- a/lib/python2.7/lib-dynload/_heapq.so
+++ b/lib/python2.7/lib-dynload/_heapq.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_hotshot.so b/lib/python2.7/lib-dynload/_hotshot.so
index 3f9fddc..d732327 100755
--- a/lib/python2.7/lib-dynload/_hotshot.so
+++ b/lib/python2.7/lib-dynload/_hotshot.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_io.so b/lib/python2.7/lib-dynload/_io.so
index c06dc6c..0561ae6 100755
--- a/lib/python2.7/lib-dynload/_io.so
+++ b/lib/python2.7/lib-dynload/_io.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_json.so b/lib/python2.7/lib-dynload/_json.so
index fb181d7..72b1acd 100755
--- a/lib/python2.7/lib-dynload/_json.so
+++ b/lib/python2.7/lib-dynload/_json.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_locale.so b/lib/python2.7/lib-dynload/_locale.so
index f3db56c..1d58d0a 100755
--- a/lib/python2.7/lib-dynload/_locale.so
+++ b/lib/python2.7/lib-dynload/_locale.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_lsprof.so b/lib/python2.7/lib-dynload/_lsprof.so
index 2b1c57f..60949ee 100755
--- a/lib/python2.7/lib-dynload/_lsprof.so
+++ b/lib/python2.7/lib-dynload/_lsprof.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_md5.so b/lib/python2.7/lib-dynload/_md5.so
new file mode 100755
index 0000000..d425dab
--- /dev/null
+++ b/lib/python2.7/lib-dynload/_md5.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_multibytecodec.so b/lib/python2.7/lib-dynload/_multibytecodec.so
index f6ea1a9..625ac96 100755
--- a/lib/python2.7/lib-dynload/_multibytecodec.so
+++ b/lib/python2.7/lib-dynload/_multibytecodec.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_multiprocessing.so b/lib/python2.7/lib-dynload/_multiprocessing.so
index f79b535..359dae3 100755
--- a/lib/python2.7/lib-dynload/_multiprocessing.so
+++ b/lib/python2.7/lib-dynload/_multiprocessing.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_random.so b/lib/python2.7/lib-dynload/_random.so
index 16bb6ab..4f2e884 100755
--- a/lib/python2.7/lib-dynload/_random.so
+++ b/lib/python2.7/lib-dynload/_random.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_sha.so b/lib/python2.7/lib-dynload/_sha.so
new file mode 100755
index 0000000..ce0c809
--- /dev/null
+++ b/lib/python2.7/lib-dynload/_sha.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_socket.so b/lib/python2.7/lib-dynload/_socket.so
index 8bba56b..6b38e29 100755
--- a/lib/python2.7/lib-dynload/_socket.so
+++ b/lib/python2.7/lib-dynload/_socket.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_ssl.so b/lib/python2.7/lib-dynload/_ssl.so
deleted file mode 100755
index 0468c9e..0000000
--- a/lib/python2.7/lib-dynload/_ssl.so
+++ /dev/null
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_struct.so b/lib/python2.7/lib-dynload/_struct.so
index 177c9b4..08b3616 100755
--- a/lib/python2.7/lib-dynload/_struct.so
+++ b/lib/python2.7/lib-dynload/_struct.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/_testcapi.so b/lib/python2.7/lib-dynload/_testcapi.so
index f43b362..4effc63 100755
--- a/lib/python2.7/lib-dynload/_testcapi.so
+++ b/lib/python2.7/lib-dynload/_testcapi.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/array.so b/lib/python2.7/lib-dynload/array.so
index 8498c9a..679c869 100755
--- a/lib/python2.7/lib-dynload/array.so
+++ b/lib/python2.7/lib-dynload/array.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/audioop.so b/lib/python2.7/lib-dynload/audioop.so
index 21b7695..f190c5e 100755
--- a/lib/python2.7/lib-dynload/audioop.so
+++ b/lib/python2.7/lib-dynload/audioop.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/binascii.so b/lib/python2.7/lib-dynload/binascii.so
index ef0495e..a56003e 100755
--- a/lib/python2.7/lib-dynload/binascii.so
+++ b/lib/python2.7/lib-dynload/binascii.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/cPickle.so b/lib/python2.7/lib-dynload/cPickle.so
index c36a5a0..1368d13 100755
--- a/lib/python2.7/lib-dynload/cPickle.so
+++ b/lib/python2.7/lib-dynload/cPickle.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/cStringIO.so b/lib/python2.7/lib-dynload/cStringIO.so
index 6432175..5290ccc 100755
--- a/lib/python2.7/lib-dynload/cStringIO.so
+++ b/lib/python2.7/lib-dynload/cStringIO.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/cmath.so b/lib/python2.7/lib-dynload/cmath.so
index 1d7a380..18a9137 100755
--- a/lib/python2.7/lib-dynload/cmath.so
+++ b/lib/python2.7/lib-dynload/cmath.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/crypt.so b/lib/python2.7/lib-dynload/crypt.so
deleted file mode 100755
index 29e809d..0000000
--- a/lib/python2.7/lib-dynload/crypt.so
+++ /dev/null
Binary files differ
diff --git a/lib/python2.7/lib-dynload/crypt_failed.so b/lib/python2.7/lib-dynload/crypt_failed.so
new file mode 100755
index 0000000..51958be
--- /dev/null
+++ b/lib/python2.7/lib-dynload/crypt_failed.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/datetime.so b/lib/python2.7/lib-dynload/datetime.so
index d0c87a9..0017f4c 100755
--- a/lib/python2.7/lib-dynload/datetime.so
+++ b/lib/python2.7/lib-dynload/datetime.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/fcntl.so b/lib/python2.7/lib-dynload/fcntl.so
index 6fe1c0e..7322415 100755
--- a/lib/python2.7/lib-dynload/fcntl.so
+++ b/lib/python2.7/lib-dynload/fcntl.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/future_builtins.so b/lib/python2.7/lib-dynload/future_builtins.so
index 6d2806d..8bc37d4 100755
--- a/lib/python2.7/lib-dynload/future_builtins.so
+++ b/lib/python2.7/lib-dynload/future_builtins.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/grp.so b/lib/python2.7/lib-dynload/grp.so
index ae174b2..86b3bb7 100755
--- a/lib/python2.7/lib-dynload/grp.so
+++ b/lib/python2.7/lib-dynload/grp.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/itertools.so b/lib/python2.7/lib-dynload/itertools.so
index 4cddbd0..aca8629 100755
--- a/lib/python2.7/lib-dynload/itertools.so
+++ b/lib/python2.7/lib-dynload/itertools.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/linuxaudiodev.so b/lib/python2.7/lib-dynload/linuxaudiodev.so
index 9c12bbf..afc199f 100755
--- a/lib/python2.7/lib-dynload/linuxaudiodev.so
+++ b/lib/python2.7/lib-dynload/linuxaudiodev.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/math.so b/lib/python2.7/lib-dynload/math.so
index 74ff5f0..eee0750 100755
--- a/lib/python2.7/lib-dynload/math.so
+++ b/lib/python2.7/lib-dynload/math.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/mmap.so b/lib/python2.7/lib-dynload/mmap.so
index 1bf74f2..cfb9f1b 100755
--- a/lib/python2.7/lib-dynload/mmap.so
+++ b/lib/python2.7/lib-dynload/mmap.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/nis.so b/lib/python2.7/lib-dynload/nis.so
deleted file mode 100755
index 1866a0b..0000000
--- a/lib/python2.7/lib-dynload/nis.so
+++ /dev/null
Binary files differ
diff --git a/lib/python2.7/lib-dynload/nis_failed.so b/lib/python2.7/lib-dynload/nis_failed.so
new file mode 100755
index 0000000..ea89960
--- /dev/null
+++ b/lib/python2.7/lib-dynload/nis_failed.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/operator.so b/lib/python2.7/lib-dynload/operator.so
index 0cac67f..8f7efba 100755
--- a/lib/python2.7/lib-dynload/operator.so
+++ b/lib/python2.7/lib-dynload/operator.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/ossaudiodev.so b/lib/python2.7/lib-dynload/ossaudiodev.so
index 7e1c5ef..ce5f9ce 100755
--- a/lib/python2.7/lib-dynload/ossaudiodev.so
+++ b/lib/python2.7/lib-dynload/ossaudiodev.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/parser.so b/lib/python2.7/lib-dynload/parser.so
index cf228b9..2f28a84 100755
--- a/lib/python2.7/lib-dynload/parser.so
+++ b/lib/python2.7/lib-dynload/parser.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/pyexpat.so b/lib/python2.7/lib-dynload/pyexpat.so
index 6ab6f31..14a6d78 100755
--- a/lib/python2.7/lib-dynload/pyexpat.so
+++ b/lib/python2.7/lib-dynload/pyexpat.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/resource.so b/lib/python2.7/lib-dynload/resource.so
index 4fb237a..db8e7b6 100755
--- a/lib/python2.7/lib-dynload/resource.so
+++ b/lib/python2.7/lib-dynload/resource.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/select.so b/lib/python2.7/lib-dynload/select.so
index 0c25df1..682112d 100755
--- a/lib/python2.7/lib-dynload/select.so
+++ b/lib/python2.7/lib-dynload/select.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/spwd.so b/lib/python2.7/lib-dynload/spwd.so
index daf8bcb..5ce5e89 100755
--- a/lib/python2.7/lib-dynload/spwd.so
+++ b/lib/python2.7/lib-dynload/spwd.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/strop.so b/lib/python2.7/lib-dynload/strop.so
index e2ba938..d4ce176 100755
--- a/lib/python2.7/lib-dynload/strop.so
+++ b/lib/python2.7/lib-dynload/strop.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/syslog.so b/lib/python2.7/lib-dynload/syslog.so
index c1ca9a4..9db001d 100755
--- a/lib/python2.7/lib-dynload/syslog.so
+++ b/lib/python2.7/lib-dynload/syslog.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/termios.so b/lib/python2.7/lib-dynload/termios.so
index 306c954..55ca138 100755
--- a/lib/python2.7/lib-dynload/termios.so
+++ b/lib/python2.7/lib-dynload/termios.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/time.so b/lib/python2.7/lib-dynload/time.so
index 91c778d..f1df4fa 100755
--- a/lib/python2.7/lib-dynload/time.so
+++ b/lib/python2.7/lib-dynload/time.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/unicodedata.so b/lib/python2.7/lib-dynload/unicodedata.so
index 4be49d6..e9353df 100755
--- a/lib/python2.7/lib-dynload/unicodedata.so
+++ b/lib/python2.7/lib-dynload/unicodedata.so
Binary files differ
diff --git a/lib/python2.7/lib-dynload/zlib.so b/lib/python2.7/lib-dynload/zlib.so
deleted file mode 100755
index dae20f9..0000000
--- a/lib/python2.7/lib-dynload/zlib.so
+++ /dev/null
Binary files differ
diff --git a/lib/python2.7/lib-tk/FixTk.py b/lib/python2.7/lib-tk/FixTk.py
index 953dcd5..8af27b5 100644
--- a/lib/python2.7/lib-tk/FixTk.py
+++ b/lib/python2.7/lib-tk/FixTk.py
@@ -49,7 +49,10 @@
 prefix = os.path.join(sys.prefix,"tcl")
 if not os.path.exists(prefix):
     # devdir/externals/tcltk/lib
-    prefix = os.path.join(sys.prefix, "externals", "tcltk", "lib")
+    tcltk = 'tcltk'
+    if sys.maxsize > 2**31 - 1:
+        tcltk = 'tcltk64'
+    prefix = os.path.join(sys.prefix, "externals", tcltk, "lib")
     prefix = os.path.abspath(prefix)
 # if this does not exist, no further search is needed
 if os.path.exists(prefix):
diff --git a/lib/python2.7/lib-tk/Tkinter.py b/lib/python2.7/lib-tk/Tkinter.py
index cb7edf0..d8d5b80 100644
--- a/lib/python2.7/lib-tk/Tkinter.py
+++ b/lib/python2.7/lib-tk/Tkinter.py
@@ -1117,7 +1117,7 @@
 
         return self._bind(('bind', className), sequence, func, add, 0)
     def unbind_class(self, className, sequence):
-        """Unbind for a all widgets with bindtag CLASSNAME for event SEQUENCE
+        """Unbind for all widgets with bindtag CLASSNAME for event SEQUENCE
         all functions."""
         self.tk.call('bind', className , sequence, '')
     def mainloop(self, n=0):
@@ -1335,8 +1335,9 @@
         raise TypeError("Tkinter objects don't support 'in' tests.")
     def keys(self):
         """Return a list of all resource names of this widget."""
-        return [x[0][1:] for x in
-                self.tk.splitlist(self.tk.call(self._w, 'configure'))]
+        splitlist = self.tk.splitlist
+        return [splitlist(x)[0][1:] for x in
+                splitlist(self.tk.call(self._w, 'configure'))]
     def __str__(self):
         """Return the window path name of this widget."""
         return self._w
@@ -2486,7 +2487,7 @@
         self.tk.call(self._w, 'toggle')
 
 class Entry(Widget, XView):
-    """Entry widget which allows to display simple text."""
+    """Entry widget which allows displaying simple text."""
     def __init__(self, master=None, cnf={}, **kw):
         """Construct an entry widget with the parent MASTER.
 
@@ -2682,7 +2683,7 @@
     itemconfig = itemconfigure
 
 class Menu(Widget):
-    """Menu widget which allows to display menu bars, pull-down menus and pop-up menus."""
+    """Menu widget which allows displaying menu bars, pull-down menus and pop-up menus."""
     def __init__(self, master=None, cnf={}, **kw):
         """Construct menu widget with the parent MASTER.
 
@@ -3379,16 +3380,20 @@
         destImage = PhotoImage(master=self.tk)
         self.tk.call(destImage, 'copy', self.name)
         return destImage
-    def zoom(self,x,y=''):
+    def zoom(self, x, y=''):
         """Return a new PhotoImage with the same image as this widget
-        but zoom it with X and Y."""
+        but zoom it with a factor of x in the X direction and y in the Y
+        direction.  If y is not given, the default value is the same as x.
+        """
         destImage = PhotoImage(master=self.tk)
         if y=='': y=x
         self.tk.call(destImage, 'copy', self.name, '-zoom',x,y)
         return destImage
-    def subsample(self,x,y=''):
+    def subsample(self, x, y=''):
         """Return a new PhotoImage based on the same image as this widget
-        but use only every Xth or Yth pixel."""
+        but use only every Xth or Yth pixel.  If y is not given, the
+        default value is the same as x.
+        """
         destImage = PhotoImage(master=self.tk)
         if y=='': y=x
         self.tk.call(destImage, 'copy', self.name, '-subsample',x,y)
diff --git a/lib/python2.7/lib-tk/test/test_tkinter/test_font.py b/lib/python2.7/lib-tk/test/test_tkinter/test_font.py
index 3eecd32..4cbf82e 100644
--- a/lib/python2.7/lib-tk/test/test_tkinter/test_font.py
+++ b/lib/python2.7/lib-tk/test/test_tkinter/test_font.py
@@ -6,14 +6,54 @@
 
 requires('gui')
 
+fontname = "TkDefaultFont"
+
 class FontTest(AbstractTkTest, unittest.TestCase):
 
-    def test_font_eq(self):
-        fontname = "TkDefaultFont"
+    @classmethod
+    def setUpClass(cls):
+        AbstractTkTest.setUpClass.__func__(cls)
         try:
-            f = font.Font(root=self.root, name=fontname, exists=True)
-        except tkinter._tkinter.TclError:
-            f = font.Font(root=self.root, name=fontname, exists=False)
+            cls.font = font.Font(root=cls.root, name=fontname, exists=True)
+        except tkinter.TclError:
+            cls.font = font.Font(root=cls.root, name=fontname, exists=False)
+
+    def test_configure(self):
+        options = self.font.configure()
+        self.assertGreaterEqual(set(options),
+            {'family', 'size', 'weight', 'slant', 'underline', 'overstrike'})
+        for key in options:
+            self.assertEqual(self.font.cget(key), options[key])
+            self.assertEqual(self.font[key], options[key])
+        for key in 'family', 'weight', 'slant':
+            self.assertIsInstance(options[key], str)
+            self.assertIsInstance(self.font.cget(key), str)
+            self.assertIsInstance(self.font[key], str)
+        sizetype = int if self.wantobjects else str
+        for key in 'size', 'underline', 'overstrike':
+            self.assertIsInstance(options[key], sizetype)
+            self.assertIsInstance(self.font.cget(key), sizetype)
+            self.assertIsInstance(self.font[key], sizetype)
+
+    def test_actual(self):
+        options = self.font.actual()
+        self.assertGreaterEqual(set(options),
+            {'family', 'size', 'weight', 'slant', 'underline', 'overstrike'})
+        for key in options:
+            self.assertEqual(self.font.actual(key), options[key])
+        for key in 'family', 'weight', 'slant':
+            self.assertIsInstance(options[key], str)
+            self.assertIsInstance(self.font.actual(key), str)
+        sizetype = int if self.wantobjects else str
+        for key in 'size', 'underline', 'overstrike':
+            self.assertIsInstance(options[key], sizetype)
+            self.assertIsInstance(self.font.actual(key), sizetype)
+
+    def test_name(self):
+        self.assertEqual(self.font.name, fontname)
+        self.assertEqual(str(self.font), fontname)
+
+    def test_eq(self):
         font1 = font.Font(root=self.root, name=fontname, exists=True)
         font2 = font.Font(root=self.root, name=fontname, exists=True)
         self.assertIsNot(font1, font2)
@@ -22,6 +62,35 @@
         self.assertNotEqual(font1, 0)
         self.assertNotIn(font1, [0])
 
+    def test_measure(self):
+        self.assertIsInstance(self.font.measure('abc'), int)
+
+    def test_metrics(self):
+        metrics = self.font.metrics()
+        self.assertGreaterEqual(set(metrics),
+            {'ascent', 'descent', 'linespace', 'fixed'})
+        for key in metrics:
+            self.assertEqual(self.font.metrics(key), metrics[key])
+            self.assertIsInstance(metrics[key], int)
+            self.assertIsInstance(self.font.metrics(key), int)
+
+    def test_families(self):
+        families = font.families(self.root)
+        self.assertIsInstance(families, tuple)
+        self.assertTrue(families)
+        for family in families:
+            self.assertIsInstance(family, (str, unicode))
+            self.assertTrue(family)
+
+    def test_names(self):
+        names = font.names(self.root)
+        self.assertIsInstance(names, tuple)
+        self.assertTrue(names)
+        for name in names:
+            self.assertIsInstance(name, (str, unicode))
+            self.assertTrue(name)
+        self.assertIn(fontname, names)
+
 tests_gui = (FontTest, )
 
 if __name__ == "__main__":
diff --git a/lib/python2.7/lib-tk/test/test_tkinter/test_geometry_managers.py b/lib/python2.7/lib-tk/test/test_tkinter/test_geometry_managers.py
index 78c1f02..941fb31 100644
--- a/lib/python2.7/lib-tk/test/test_tkinter/test_geometry_managers.py
+++ b/lib/python2.7/lib-tk/test/test_tkinter/test_geometry_managers.py
@@ -12,6 +12,8 @@
 
 class PackTest(AbstractWidgetTest, unittest.TestCase):
 
+    test_keys = None
+
     def create2(self):
         pack = tkinter.Toplevel(self.root, name='pack')
         pack.wm_geometry('300x200+0+0')
@@ -276,6 +278,8 @@
 
 class PlaceTest(AbstractWidgetTest, unittest.TestCase):
 
+    test_keys = None
+
     def create2(self):
         t = tkinter.Toplevel(self.root, width=300, height=200, bd=0)
         t.wm_geometry('300x200+0+0')
@@ -478,6 +482,8 @@
 
 class GridTest(AbstractWidgetTest, unittest.TestCase):
 
+    test_keys = None
+
     def tearDown(self):
         cols, rows = self.root.grid_size()
         for i in range(cols + 1):
diff --git a/lib/python2.7/lib-tk/test/test_tkinter/test_widgets.py b/lib/python2.7/lib-tk/test/test_tkinter/test_widgets.py
index 1916e34..4da3096 100644
--- a/lib/python2.7/lib-tk/test/test_tkinter/test_widgets.py
+++ b/lib/python2.7/lib-tk/test/test_tkinter/test_widgets.py
@@ -99,7 +99,7 @@
         'background', 'borderwidth',
         'class', 'colormap', 'container', 'cursor', 'height',
         'highlightbackground', 'highlightcolor', 'highlightthickness',
-        'relief', 'takefocus', 'visual', 'width',
+        'padx', 'pady', 'relief', 'takefocus', 'visual', 'width',
     )
 
     def create(self, **kwargs):
@@ -633,7 +633,7 @@
         'highlightbackground', 'highlightcolor', 'highlightthickness',
         'insertbackground', 'insertborderwidth',
         'insertofftime', 'insertontime', 'insertwidth',
-        'relief', 'scrollregion',
+        'offset', 'relief', 'scrollregion',
         'selectbackground', 'selectborderwidth', 'selectforeground',
         'state', 'takefocus',
         'xscrollcommand', 'xscrollincrement',
@@ -655,6 +655,15 @@
         widget = self.create()
         self.checkBooleanParam(widget, 'confine')
 
+    def test_offset(self):
+        widget = self.create()
+        self.assertEqual(widget['offset'], '0,0')
+        self.checkParams(widget, 'offset',
+                'n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw', 'center')
+        self.checkParam(widget, 'offset', '10,20')
+        self.checkParam(widget, 'offset', '#5,6')
+        self.checkInvalidParam(widget, 'offset', 'spam')
+
     def test_scrollregion(self):
         widget = self.create()
         self.checkParam(widget, 'scrollregion', '0 0 200 150')
diff --git a/lib/python2.7/lib-tk/test/test_ttk/support.py b/lib/python2.7/lib-tk/test/test_ttk/support.py
index 91795d9..c4d842a 100644
--- a/lib/python2.7/lib-tk/test/test_ttk/support.py
+++ b/lib/python2.7/lib-tk/test/test_ttk/support.py
@@ -23,7 +23,7 @@
     def tearDownClass(cls):
         cls.root.update_idletasks()
         cls.root.destroy()
-        cls.root = None
+        del cls.root
         tkinter._default_root = None
         tkinter._support_default_root = cls._old_support_default_root
 
diff --git a/lib/python2.7/lib-tk/test/test_ttk/test_widgets.py b/lib/python2.7/lib-tk/test/test_ttk/test_widgets.py
index 71de83f..f874a9c 100644
--- a/lib/python2.7/lib-tk/test/test_ttk/test_widgets.py
+++ b/lib/python2.7/lib-tk/test/test_ttk/test_widgets.py
@@ -189,7 +189,7 @@
 @add_standard_options(StandardTtkOptionsTests)
 class LabelTest(AbstractLabelTest, unittest.TestCase):
     OPTIONS = (
-        'anchor', 'background',
+        'anchor', 'background', 'borderwidth',
         'class', 'compound', 'cursor', 'font', 'foreground',
         'image', 'justify', 'padding', 'relief', 'state', 'style',
         'takefocus', 'text', 'textvariable',
@@ -210,7 +210,8 @@
 class ButtonTest(AbstractLabelTest, unittest.TestCase):
     OPTIONS = (
         'class', 'command', 'compound', 'cursor', 'default',
-        'image', 'state', 'style', 'takefocus', 'text', 'textvariable',
+        'image', 'padding', 'state', 'style',
+        'takefocus', 'text', 'textvariable',
         'underline', 'width',
     )
 
@@ -234,7 +235,7 @@
         'class', 'command', 'compound', 'cursor',
         'image',
         'offvalue', 'onvalue',
-        'state', 'style',
+        'padding', 'state', 'style',
         'takefocus', 'text', 'textvariable',
         'underline', 'variable', 'width',
     )
@@ -278,136 +279,10 @@
 
 
 @add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
-class ComboboxTest(AbstractWidgetTest, unittest.TestCase):
-    OPTIONS = (
-        'class', 'cursor', 'exportselection', 'height',
-        'justify', 'postcommand', 'state', 'style',
-        'takefocus', 'textvariable', 'values', 'width',
-    )
-
-    def setUp(self):
-        super(ComboboxTest, self).setUp()
-        self.combo = self.create()
-
-    def create(self, **kwargs):
-        return ttk.Combobox(self.root, **kwargs)
-
-    def test_height(self):
-        widget = self.create()
-        self.checkParams(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i')
-
-    def test_state(self):
-        widget = self.create()
-        self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
-
-    def _show_drop_down_listbox(self):
-        width = self.combo.winfo_width()
-        self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
-        self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
-        self.combo.update_idletasks()
-
-
-    def test_virtual_event(self):
-        success = []
-
-        self.combo['values'] = [1]
-        self.combo.bind('<<ComboboxSelected>>',
-            lambda evt: success.append(True))
-        self.combo.pack()
-        self.combo.wait_visibility()
-
-        height = self.combo.winfo_height()
-        self._show_drop_down_listbox()
-        self.combo.update()
-        self.combo.event_generate('<Return>')
-        self.combo.update()
-
-        self.assertTrue(success)
-
-
-    def test_postcommand(self):
-        success = []
-
-        self.combo['postcommand'] = lambda: success.append(True)
-        self.combo.pack()
-        self.combo.wait_visibility()
-
-        self._show_drop_down_listbox()
-        self.assertTrue(success)
-
-        # testing postcommand removal
-        self.combo['postcommand'] = ''
-        self._show_drop_down_listbox()
-        self.assertEqual(len(success), 1)
-
-
-    def test_values(self):
-        def check_get_current(getval, currval):
-            self.assertEqual(self.combo.get(), getval)
-            self.assertEqual(self.combo.current(), currval)
-
-        self.assertEqual(self.combo['values'],
-                         () if tcl_version < (8, 5) else '')
-        check_get_current('', -1)
-
-        self.checkParam(self.combo, 'values', 'mon tue wed thur',
-                        expected=('mon', 'tue', 'wed', 'thur'))
-        self.checkParam(self.combo, 'values', ('mon', 'tue', 'wed', 'thur'))
-        self.checkParam(self.combo, 'values', (42, 3.14, '', 'any string'))
-        self.checkParam(self.combo, 'values', () if tcl_version < (8, 5) else '')
-
-        self.combo['values'] = ['a', 1, 'c']
-
-        self.combo.set('c')
-        check_get_current('c', 2)
-
-        self.combo.current(0)
-        check_get_current('a', 0)
-
-        self.combo.set('d')
-        check_get_current('d', -1)
-
-        # testing values with empty string
-        self.combo.set('')
-        self.combo['values'] = (1, 2, '', 3)
-        check_get_current('', 2)
-
-        # testing values with empty string set through configure
-        self.combo.configure(values=[1, '', 2])
-        self.assertEqual(self.combo['values'],
-                         ('1', '', '2') if self.wantobjects else
-                         '1 {} 2')
-
-        # testing values with spaces
-        self.combo['values'] = ['a b', 'a\tb', 'a\nb']
-        self.assertEqual(self.combo['values'],
-                         ('a b', 'a\tb', 'a\nb') if self.wantobjects else
-                         '{a b} {a\tb} {a\nb}')
-
-        # testing values with special characters
-        self.combo['values'] = [r'a\tb', '"a"', '} {']
-        self.assertEqual(self.combo['values'],
-                         (r'a\tb', '"a"', '} {') if self.wantobjects else
-                         r'a\\tb {"a"} \}\ \{')
-
-        # out of range
-        self.assertRaises(tkinter.TclError, self.combo.current,
-            len(self.combo['values']))
-        # it expects an integer (or something that can be converted to int)
-        self.assertRaises(tkinter.TclError, self.combo.current, '')
-
-        # testing creating combobox with empty string in values
-        combo2 = ttk.Combobox(self.root, values=[1, 2, ''])
-        self.assertEqual(combo2['values'],
-                         ('1', '2', '') if self.wantobjects else '1 2 {}')
-        combo2.destroy()
-
-
-@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
 class EntryTest(AbstractWidgetTest, unittest.TestCase):
     OPTIONS = (
         'background', 'class', 'cursor',
-        'exportselection', 'font',
+        'exportselection', 'font', 'foreground',
         'invalidcommand', 'justify',
         'show', 'state', 'style', 'takefocus', 'textvariable',
         'validate', 'validatecommand', 'width', 'xscrollcommand',
@@ -536,6 +411,131 @@
 
 
 @add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
+class ComboboxTest(EntryTest, unittest.TestCase):
+    OPTIONS = (
+        'background', 'class', 'cursor', 'exportselection',
+        'font', 'foreground', 'height', 'invalidcommand',
+        'justify', 'postcommand', 'show', 'state', 'style',
+        'takefocus', 'textvariable',
+        'validate', 'validatecommand', 'values',
+        'width', 'xscrollcommand',
+    )
+
+    def setUp(self):
+        super(ComboboxTest, self).setUp()
+        self.combo = self.create()
+
+    def create(self, **kwargs):
+        return ttk.Combobox(self.root, **kwargs)
+
+    def test_height(self):
+        widget = self.create()
+        self.checkParams(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i')
+
+    def _show_drop_down_listbox(self):
+        width = self.combo.winfo_width()
+        self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
+        self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
+        self.combo.update_idletasks()
+
+
+    def test_virtual_event(self):
+        success = []
+
+        self.combo['values'] = [1]
+        self.combo.bind('<<ComboboxSelected>>',
+            lambda evt: success.append(True))
+        self.combo.pack()
+        self.combo.wait_visibility()
+
+        height = self.combo.winfo_height()
+        self._show_drop_down_listbox()
+        self.combo.update()
+        self.combo.event_generate('<Return>')
+        self.combo.update()
+
+        self.assertTrue(success)
+
+
+    def test_postcommand(self):
+        success = []
+
+        self.combo['postcommand'] = lambda: success.append(True)
+        self.combo.pack()
+        self.combo.wait_visibility()
+
+        self._show_drop_down_listbox()
+        self.assertTrue(success)
+
+        # testing postcommand removal
+        self.combo['postcommand'] = ''
+        self._show_drop_down_listbox()
+        self.assertEqual(len(success), 1)
+
+
+    def test_values(self):
+        def check_get_current(getval, currval):
+            self.assertEqual(self.combo.get(), getval)
+            self.assertEqual(self.combo.current(), currval)
+
+        self.assertEqual(self.combo['values'],
+                         () if tcl_version < (8, 5) else '')
+        check_get_current('', -1)
+
+        self.checkParam(self.combo, 'values', 'mon tue wed thur',
+                        expected=('mon', 'tue', 'wed', 'thur'))
+        self.checkParam(self.combo, 'values', ('mon', 'tue', 'wed', 'thur'))
+        self.checkParam(self.combo, 'values', (42, 3.14, '', 'any string'))
+        self.checkParam(self.combo, 'values', () if tcl_version < (8, 5) else '')
+
+        self.combo['values'] = ['a', 1, 'c']
+
+        self.combo.set('c')
+        check_get_current('c', 2)
+
+        self.combo.current(0)
+        check_get_current('a', 0)
+
+        self.combo.set('d')
+        check_get_current('d', -1)
+
+        # testing values with empty string
+        self.combo.set('')
+        self.combo['values'] = (1, 2, '', 3)
+        check_get_current('', 2)
+
+        # testing values with empty string set through configure
+        self.combo.configure(values=[1, '', 2])
+        self.assertEqual(self.combo['values'],
+                         ('1', '', '2') if self.wantobjects else
+                         '1 {} 2')
+
+        # testing values with spaces
+        self.combo['values'] = ['a b', 'a\tb', 'a\nb']
+        self.assertEqual(self.combo['values'],
+                         ('a b', 'a\tb', 'a\nb') if self.wantobjects else
+                         '{a b} {a\tb} {a\nb}')
+
+        # testing values with special characters
+        self.combo['values'] = [r'a\tb', '"a"', '} {']
+        self.assertEqual(self.combo['values'],
+                         (r'a\tb', '"a"', '} {') if self.wantobjects else
+                         r'a\\tb {"a"} \}\ \{')
+
+        # out of range
+        self.assertRaises(tkinter.TclError, self.combo.current,
+            len(self.combo['values']))
+        # it expects an integer (or something that can be converted to int)
+        self.assertRaises(tkinter.TclError, self.combo.current, '')
+
+        # testing creating combobox with empty string in values
+        combo2 = ttk.Combobox(self.root, values=[1, 2, ''])
+        self.assertEqual(combo2['values'],
+                         ('1', '2', '') if self.wantobjects else '1 2 {}')
+        combo2.destroy()
+
+
+@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
 class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
     OPTIONS = (
         'class', 'cursor', 'height',
@@ -675,7 +675,7 @@
     OPTIONS = (
         'class', 'command', 'compound', 'cursor',
         'image',
-        'state', 'style',
+        'padding', 'state', 'style',
         'takefocus', 'text', 'textvariable',
         'underline', 'value', 'variable', 'width',
     )
@@ -725,7 +725,7 @@
 class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
     OPTIONS = (
         'class', 'compound', 'cursor', 'direction',
-        'image', 'menu', 'state', 'style',
+        'image', 'menu', 'padding', 'state', 'style',
         'takefocus', 'text', 'textvariable',
         'underline', 'width',
     )
@@ -903,7 +903,7 @@
 @add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
 class NotebookTest(AbstractWidgetTest, unittest.TestCase):
     OPTIONS = (
-        'class', 'cursor', 'height', 'padding', 'style', 'takefocus',
+        'class', 'cursor', 'height', 'padding', 'style', 'takefocus', 'width',
     )
 
     def setUp(self):
diff --git a/lib/python2.7/lib-tk/test/widget_tests.py b/lib/python2.7/lib-tk/test/widget_tests.py
index 1b24324..33f716f 100644
--- a/lib/python2.7/lib-tk/test/widget_tests.py
+++ b/lib/python2.7/lib-tk/test/widget_tests.py
@@ -224,6 +224,32 @@
                 self.fail('Invalid bounding box: %r' % (bbox,))
                 break
 
+    def test_keys(self):
+        widget = self.create()
+        keys = widget.keys()
+        # XXX
+        if not isinstance(widget, Scale):
+            self.assertEqual(sorted(keys), sorted(widget.configure()))
+        for k in keys:
+            widget[k]
+        # Test if OPTIONS contains all keys
+        if test.test_support.verbose:
+            aliases = {
+                'bd': 'borderwidth',
+                'bg': 'background',
+                'fg': 'foreground',
+                'invcmd': 'invalidcommand',
+                'vcmd': 'validatecommand',
+            }
+            keys = set(keys)
+            expected = set(self.OPTIONS)
+            for k in sorted(keys - expected):
+                if not (k in aliases and
+                        aliases[k] in keys and
+                        aliases[k] in expected):
+                    print('%s.OPTIONS doesn\'t contain "%s"' %
+                          (self.__class__.__name__, k))
+
 
 class StandardOptionsTests(object):
     STANDARD_OPTIONS = (
diff --git a/lib/python2.7/lib2to3/Grammar2.7.10.final.0.pickle b/lib/python2.7/lib2to3/Grammar2.7.11.final.0.pickle
similarity index 88%
rename from lib/python2.7/lib2to3/Grammar2.7.10.final.0.pickle
rename to lib/python2.7/lib2to3/Grammar2.7.11.final.0.pickle
index 4bb7978..090da85 100644
--- a/lib/python2.7/lib2to3/Grammar2.7.10.final.0.pickle
+++ b/lib/python2.7/lib2to3/Grammar2.7.11.final.0.pickle
Binary files differ
diff --git a/lib/python2.7/lib2to3/PatternGrammar2.7.10.final.0.pickle b/lib/python2.7/lib2to3/PatternGrammar2.7.11.final.0.pickle
similarity index 68%
rename from lib/python2.7/lib2to3/PatternGrammar2.7.10.final.0.pickle
rename to lib/python2.7/lib2to3/PatternGrammar2.7.11.final.0.pickle
index 2c86675..dd783b1 100644
--- a/lib/python2.7/lib2to3/PatternGrammar2.7.10.final.0.pickle
+++ b/lib/python2.7/lib2to3/PatternGrammar2.7.11.final.0.pickle
Binary files differ
diff --git a/lib/python2.7/lib2to3/fixes/fix_metaclass.py b/lib/python2.7/lib2to3/fixes/fix_metaclass.py
index 4f5593c..8399a13 100644
--- a/lib/python2.7/lib2to3/fixes/fix_metaclass.py
+++ b/lib/python2.7/lib2to3/fixes/fix_metaclass.py
@@ -114,7 +114,7 @@
                 left_node = expr_node.children[0]
                 if isinstance(left_node, Leaf) and \
                         left_node.value == u'__metaclass__':
-                    # We found a assignment to __metaclass__.
+                    # We found an assignment to __metaclass__.
                     fixup_simple_stmt(node, i, simple_node)
                     remove_trailing_newline(simple_node)
                     yield (node, i, simple_node)
diff --git a/lib/python2.7/lib2to3/fixes/fix_types.py b/lib/python2.7/lib2to3/fixes/fix_types.py
index fc9d495..baaeabd 100644
--- a/lib/python2.7/lib2to3/fixes/fix_types.py
+++ b/lib/python2.7/lib2to3/fixes/fix_types.py
@@ -42,7 +42,7 @@
         'NotImplementedType' : 'type(NotImplemented)',
         'SliceType' : 'slice',
         'StringType': 'bytes', # XXX ?
-        'StringTypes' : 'str', # XXX ?
+        'StringTypes' : '(str,)', # XXX ?
         'TupleType': 'tuple',
         'TypeType' : 'type',
         'UnicodeType': 'str',
diff --git a/lib/python2.7/lib2to3/refactor.py b/lib/python2.7/lib2to3/refactor.py
index a4c168d..bd23897 100644
--- a/lib/python2.7/lib2to3/refactor.py
+++ b/lib/python2.7/lib2to3/refactor.py
@@ -255,7 +255,7 @@
             fixer = fix_class(self.options, self.fixer_log)
             if fixer.explicit and self.explicit is not True and \
                     fix_mod_path not in self.explicit:
-                self.log_message("Skipping implicit fixer: %s", fix_name)
+                self.log_message("Skipping optional fixer: %s", fix_name)
                 continue
 
             self.log_debug("Adding transformation: %s", fix_name)
diff --git a/lib/python2.7/lib2to3/tests/test_fixers.py b/lib/python2.7/lib2to3/tests/test_fixers.py
index 7db9af2..6fa603f 100644
--- a/lib/python2.7/lib2to3/tests/test_fixers.py
+++ b/lib/python2.7/lib2to3/tests/test_fixers.py
@@ -3263,6 +3263,10 @@
         a = """type(None)"""
         self.check(b, a)
 
+        b = "types.StringTypes"
+        a = "(str,)"
+        self.check(b, a)
+
 class Test_idioms(FixerTestCase):
     fixer = "idioms"
 
diff --git a/lib/python2.7/locale.py b/lib/python2.7/locale.py
index 15c53ba..f547bab 100644
--- a/lib/python2.7/locale.py
+++ b/lib/python2.7/locale.py
@@ -18,6 +18,10 @@
 import operator
 import functools
 
+# keep a copy of the builtin str type, because 'str' name is overriden
+# in globals by a function below
+_str = str
+
 try:
     _unicode = unicode
 except NameError:
@@ -573,7 +577,7 @@
         category may be given as one of the LC_* values.
 
     """
-    if locale and type(locale) is not type(""):
+    if locale and not isinstance(locale, (_str, _unicode)):
         # convert to string
         locale = normalize(_build_localename(locale))
     return _setlocale(category, locale)
diff --git a/lib/python2.7/logging/__init__.py b/lib/python2.7/logging/__init__.py
index bd4afeb..caf151d 100644
--- a/lib/python2.7/logging/__init__.py
+++ b/lib/python2.7/logging/__init__.py
@@ -59,18 +59,6 @@
 except NameError:
     _unicode = False
 
-#
-# _srcfile is used when walking the stack to check when we've got the first
-# caller stack frame.
-#
-if hasattr(sys, 'frozen'): #support for py2exe
-    _srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
-elif __file__[-4:].lower() in ['.pyc', '.pyo']:
-    _srcfile = __file__[:-4] + '.py'
-else:
-    _srcfile = __file__
-_srcfile = os.path.normcase(_srcfile)
-
 # next bit filched from 1.5.2's inspect.py
 def currentframe():
     """Return the frame object for the caller's stack frame."""
@@ -82,6 +70,12 @@
 if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
 # done filching
 
+#
+# _srcfile is used when walking the stack to check when we've got the first
+# caller stack frame.
+#
+_srcfile = os.path.normcase(currentframe.__code__.co_filename)
+
 # _srcfile is only used in conjunction with sys._getframe().
 # To provide compatibility with older versions of Python, set _srcfile
 # to None if _getframe() is not available; this value will prevent
@@ -471,7 +465,15 @@
         record.message = record.getMessage()
         if self.usesTime():
             record.asctime = self.formatTime(record, self.datefmt)
-        s = self._fmt % record.__dict__
+        try:
+            s = self._fmt % record.__dict__
+        except UnicodeDecodeError as e:
+            # Issue 25664. The logger name may be Unicode. Try again ...
+            try:
+                record.name = record.name.decode('utf-8')
+                s = self._fmt % record.__dict__
+            except UnicodeDecodeError:
+                raise e
         if record.exc_info:
             # Cache the traceback text to avoid converting it multiple times
             # (it's constant anyway)
diff --git a/lib/python2.7/mhlib.py b/lib/python2.7/mhlib.py
index 856e878..46311fc 100644
--- a/lib/python2.7/mhlib.py
+++ b/lib/python2.7/mhlib.py
@@ -159,7 +159,7 @@
         # Get the link count so we can avoid listing folders
         # that have no subfolders.
         nlinks = os.stat(fullname).st_nlink
-        if nlinks <= 2:
+        if nlinks == 2:
             return []
         subfolders = []
         subnames = os.listdir(fullname)
@@ -171,7 +171,7 @@
                 # Stop looking for subfolders when
                 # we've seen them all
                 nlinks = nlinks - 1
-                if nlinks <= 2:
+                if nlinks == 2:
                     break
         subfolders.sort()
         return subfolders
@@ -186,7 +186,7 @@
         # Get the link count so we can avoid listing folders
         # that have no subfolders.
         nlinks = os.stat(fullname).st_nlink
-        if nlinks <= 2:
+        if nlinks == 2:
             return []
         subfolders = []
         subnames = os.listdir(fullname)
@@ -203,7 +203,7 @@
                 # Stop looking for subfolders when
                 # we've seen them all
                 nlinks = nlinks - 1
-                if nlinks <= 2:
+                if nlinks == 2:
                     break
         subfolders.sort()
         return subfolders
diff --git a/lib/python2.7/multiprocessing/forking.py b/lib/python2.7/multiprocessing/forking.py
index 6bddfb7..d393817 100644
--- a/lib/python2.7/multiprocessing/forking.py
+++ b/lib/python2.7/multiprocessing/forking.py
@@ -470,12 +470,26 @@
         process.ORIGINAL_DIR = data['orig_dir']
 
     if 'main_path' in data:
+        # XXX (ncoghlan): The following code makes several bogus
+        # assumptions regarding the relationship between __file__
+        # and a module's real name. See PEP 302 and issue #10845
+        # The problem is resolved properly in Python 3.4+, as
+        # described in issue #19946
+
         main_path = data['main_path']
         main_name = os.path.splitext(os.path.basename(main_path))[0]
         if main_name == '__init__':
             main_name = os.path.basename(os.path.dirname(main_path))
 
-        if main_name != 'ipython':
+        if main_name == '__main__':
+            # For directory and zipfile execution, we assume an implicit
+            # "if __name__ == '__main__':" around the module, and don't
+            # rerun the main module code in spawned processes
+            main_module = sys.modules['__main__']
+            main_module.__file__ = main_path
+        elif main_name != 'ipython':
+            # Main modules not actually called __main__.py may
+            # contain additional code that should still be executed
             import imp
 
             if main_path is None:
diff --git a/lib/python2.7/nturl2path.py b/lib/python2.7/nturl2path.py
index 10ea272..9e6eb0d 100644
--- a/lib/python2.7/nturl2path.py
+++ b/lib/python2.7/nturl2path.py
@@ -4,9 +4,11 @@
     """OS-specific conversion from a relative URL of the 'file' scheme
     to a file system path; not recommended for general use."""
     # e.g.
-    # ///C|/foo/bar/spam.foo
-    # becomes
-    # C:\foo\bar\spam.foo
+    #   ///C|/foo/bar/spam.foo
+    # and
+    #   ///C:/foo/bar/spam.foo
+    # become
+    #   C:\foo\bar\spam.foo
     import string, urllib
     # Windows itself uses ":" even in URLs.
     url = url.replace(':', '|')
@@ -39,9 +41,9 @@
     """OS-specific conversion from a file system path to a relative URL
     of the 'file' scheme; not recommended for general use."""
     # e.g.
-    # C:\foo\bar\spam.foo
+    #   C:\foo\bar\spam.foo
     # becomes
-    # ///C|/foo/bar/spam.foo
+    #   ///C:/foo/bar/spam.foo
     import urllib
     if not ':' in p:
         # No drive specifier, just convert slashes and quote the name
diff --git a/lib/python2.7/pdb.py b/lib/python2.7/pdb.py
index 113b4e0..4d35103 100755
--- a/lib/python2.7/pdb.py
+++ b/lib/python2.7/pdb.py
@@ -1322,6 +1322,9 @@
             # In most cases SystemExit does not warrant a post-mortem session.
             print "The program exited via sys.exit(). Exit status: ",
             print sys.exc_info()[1]
+        except SyntaxError:
+            traceback.print_exc()
+            sys.exit(1)
         except:
             traceback.print_exc()
             print "Uncaught exception. Entering post mortem debugging"
diff --git a/lib/python2.7/pickle.py b/lib/python2.7/pickle.py
index 299de16..1b3196f 100644
--- a/lib/python2.7/pickle.py
+++ b/lib/python2.7/pickle.py
@@ -402,7 +402,13 @@
             write(REDUCE)
 
         if obj is not None:
-            self.memoize(obj)
+            # If the object is already in the memo, this means it is
+            # recursive. In this case, throw away everything we put on the
+            # stack, and fetch the object back from the memo.
+            if id(obj) in self.memo:
+                write(POP + self.get(self.memo[id(obj)][0]))
+            else:
+                self.memoize(obj)
 
         # More new special cases (that work with older protocols as
         # well): when __reduce__ returns a tuple with 4 or 5 items,
diff --git a/lib/python2.7/platform.py b/lib/python2.7/platform.py
index df2af83..3cf2303 100755
--- a/lib/python2.7/platform.py
+++ b/lib/python2.7/platform.py
@@ -28,12 +28,14 @@
 #      Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
 #      Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
 #      Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
-#      Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter
+#      Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter, Steve
+#      Dower
 #
 #    History:
 #
 #    <see CVS and SVN checkin messages for history>
 #
+#    1.0.8 - changed Windows support to read version from kernel32.dll
 #    1.0.7 - added DEV_NULL
 #    1.0.6 - added linux_distribution()
 #    1.0.5 - fixed Java support to allow running the module on Jython
@@ -531,189 +533,140 @@
         version = _norm_version(version)
     return system,release,version
 
-def _win32_getvalue(key,name,default=''):
+_WIN32_CLIENT_RELEASES = {
+    (5, 0): "2000",
+    (5, 1): "XP",
+    # Strictly, 5.2 client is XP 64-bit, but platform.py historically
+    # has always called it 2003 Server
+    (5, 2): "2003Server",
+    (5, None): "post2003",
 
-    """ Read a value for name from the registry key.
+    (6, 0): "Vista",
+    (6, 1): "7",
+    (6, 2): "8",
+    (6, 3): "8.1",
+    (6, None): "post8.1",
 
-        In case this fails, default is returned.
+    (10, 0): "10",
+    (10, None): "post10",
+}
 
-    """
+# Server release name lookup will default to client names if necessary
+_WIN32_SERVER_RELEASES = {
+    (5, 2): "2003Server",
+
+    (6, 0): "2008Server",
+    (6, 1): "2008ServerR2",
+    (6, 2): "2012Server",
+    (6, 3): "2012ServerR2",
+    (6, None): "post2012ServerR2",
+}
+
+def _get_real_winver(maj, min, build):
+    if maj < 6 or (maj == 6 and min < 2):
+        return maj, min, build
+
+    from ctypes import (c_buffer, POINTER, byref, create_unicode_buffer,
+                        Structure, WinDLL)
+    from ctypes.wintypes import DWORD, HANDLE
+
+    class VS_FIXEDFILEINFO(Structure):
+        _fields_ = [
+            ("dwSignature", DWORD),
+            ("dwStrucVersion", DWORD),
+            ("dwFileVersionMS", DWORD),
+            ("dwFileVersionLS", DWORD),
+            ("dwProductVersionMS", DWORD),
+            ("dwProductVersionLS", DWORD),
+            ("dwFileFlagsMask", DWORD),
+            ("dwFileFlags", DWORD),
+            ("dwFileOS", DWORD),
+            ("dwFileType", DWORD),
+            ("dwFileSubtype", DWORD),
+            ("dwFileDateMS", DWORD),
+            ("dwFileDateLS", DWORD),
+        ]
+
+    kernel32 = WinDLL('kernel32')
+    version = WinDLL('version')
+
+    # We will immediately double the length up to MAX_PATH, but the
+    # path may be longer, so we retry until the returned string is
+    # shorter than our buffer.
+    name_len = actual_len = 130
+    while actual_len == name_len:
+        name_len *= 2
+        name = create_unicode_buffer(name_len)
+        actual_len = kernel32.GetModuleFileNameW(HANDLE(kernel32._handle),
+                                                 name, len(name))
+        if not actual_len:
+            return maj, min, build
+
+    size = version.GetFileVersionInfoSizeW(name, None)
+    if not size:
+        return maj, min, build
+
+    ver_block = c_buffer(size)
+    if (not version.GetFileVersionInfoW(name, None, size, ver_block) or
+        not ver_block):
+        return maj, min, build
+
+    pvi = POINTER(VS_FIXEDFILEINFO)()
+    if not version.VerQueryValueW(ver_block, "", byref(pvi), byref(DWORD())):
+        return maj, min, build
+
+    maj = pvi.contents.dwProductVersionMS >> 16
+    min = pvi.contents.dwProductVersionMS & 0xFFFF
+    build = pvi.contents.dwProductVersionLS >> 16
+
+    return maj, min, build
+
+def win32_ver(release='', version='', csd='', ptype=''):
     try:
-        # Use win32api if available
-        from win32api import RegQueryValueEx
+        from sys import getwindowsversion
     except ImportError:
-        # On Python 2.0 and later, emulate using _winreg
-        import _winreg
-        RegQueryValueEx = _winreg.QueryValueEx
+        return release, version, csd, ptype
     try:
-        return RegQueryValueEx(key,name)
-    except:
-        return default
-
-def win32_ver(release='',version='',csd='',ptype=''):
-
-    """ Get additional version information from the Windows Registry
-        and return a tuple (version,csd,ptype) referring to version
-        number, CSD level (service pack), and OS type (multi/single
-        processor).
-
-        As a hint: ptype returns 'Uniprocessor Free' on single
-        processor NT machines and 'Multiprocessor Free' on multi
-        processor machines. The 'Free' refers to the OS version being
-        free of debugging code. It could also state 'Checked' which
-        means the OS version uses debugging code, i.e. code that
-        checks arguments, ranges, etc. (Thomas Heller).
-
-        Note: this function works best with Mark Hammond's win32
-        package installed, but also on Python 2.3 and later. It
-        obviously only runs on Win32 compatible platforms.
-
-    """
-    # XXX Is there any way to find out the processor type on WinXX ?
-    # XXX Is win32 available on Windows CE ?
-    #
-    # Adapted from code posted by Karl Putland to comp.lang.python.
-    #
-    # The mappings between reg. values and release names can be found
-    # here: http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
-
-    # Import the needed APIs
-    try:
-        import win32api
-        from win32api import RegQueryValueEx, RegOpenKeyEx, \
-             RegCloseKey, GetVersionEx
-        from win32con import HKEY_LOCAL_MACHINE, VER_PLATFORM_WIN32_NT, \
-             VER_PLATFORM_WIN32_WINDOWS, VER_NT_WORKSTATION
+        from winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE
     except ImportError:
-        # Emulate the win32api module using Python APIs
+        from _winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE
+
+    winver = getwindowsversion()
+    maj, min, build = _get_real_winver(*winver[:3])
+    version = '{0}.{1}.{2}'.format(maj, min, build)
+
+    release = (_WIN32_CLIENT_RELEASES.get((maj, min)) or
+               _WIN32_CLIENT_RELEASES.get((maj, None)) or
+               release)
+
+    # getwindowsversion() reflect the compatibility mode Python is
+    # running under, and so the service pack value is only going to be
+    # valid if the versions match.
+    if winver[:2] == (maj, min):
         try:
-            sys.getwindowsversion
+            csd = 'SP{}'.format(winver.service_pack_major)
         except AttributeError:
-            # No emulation possible, so return the defaults...
-            return release,version,csd,ptype
-        else:
-            # Emulation using _winreg (added in Python 2.0) and
-            # sys.getwindowsversion() (added in Python 2.3)
-            import _winreg
-            GetVersionEx = sys.getwindowsversion
-            RegQueryValueEx = _winreg.QueryValueEx
-            RegOpenKeyEx = _winreg.OpenKeyEx
-            RegCloseKey = _winreg.CloseKey
-            HKEY_LOCAL_MACHINE = _winreg.HKEY_LOCAL_MACHINE
-            VER_PLATFORM_WIN32_WINDOWS = 1
-            VER_PLATFORM_WIN32_NT = 2
-            VER_NT_WORKSTATION = 1
-            VER_NT_SERVER = 3
-            REG_SZ = 1
+            if csd[:13] == 'Service Pack ':
+                csd = 'SP' + csd[13:]
 
-    # Find out the registry key and some general version infos
-    winver = GetVersionEx()
-    maj,min,buildno,plat,csd = winver
-    version = '%i.%i.%i' % (maj,min,buildno & 0xFFFF)
-    if hasattr(winver, "service_pack"):
-        if winver.service_pack != "":
-            csd = 'SP%s' % winver.service_pack_major
-    else:
-        if csd[:13] == 'Service Pack ':
-            csd = 'SP' + csd[13:]
+    # VER_NT_SERVER = 3
+    if getattr(winver, 'product', None) == 3:
+        release = (_WIN32_SERVER_RELEASES.get((maj, min)) or
+                   _WIN32_SERVER_RELEASES.get((maj, None)) or
+                   release)
 
-    if plat == VER_PLATFORM_WIN32_WINDOWS:
-        regkey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
-        # Try to guess the release name
-        if maj == 4:
-            if min == 0:
-                release = '95'
-            elif min == 10:
-                release = '98'
-            elif min == 90:
-                release = 'Me'
-            else:
-                release = 'postMe'
-        elif maj == 5:
-            release = '2000'
-
-    elif plat == VER_PLATFORM_WIN32_NT:
-        regkey = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
-        if maj <= 4:
-            release = 'NT'
-        elif maj == 5:
-            if min == 0:
-                release = '2000'
-            elif min == 1:
-                release = 'XP'
-            elif min == 2:
-                release = '2003Server'
-            else:
-                release = 'post2003'
-        elif maj == 6:
-            if hasattr(winver, "product_type"):
-                product_type = winver.product_type
-            else:
-                product_type = VER_NT_WORKSTATION
-                # Without an OSVERSIONINFOEX capable sys.getwindowsversion(),
-                # or help from the registry, we cannot properly identify
-                # non-workstation versions.
-                try:
-                    key = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
-                    name, type = RegQueryValueEx(key, "ProductName")
-                    # Discard any type that isn't REG_SZ
-                    if type == REG_SZ and name.find("Server") != -1:
-                        product_type = VER_NT_SERVER
-                except WindowsError:
-                    # Use default of VER_NT_WORKSTATION
-                    pass
-
-            if min == 0:
-                if product_type == VER_NT_WORKSTATION:
-                    release = 'Vista'
-                else:
-                    release = '2008Server'
-            elif min == 1:
-                if product_type == VER_NT_WORKSTATION:
-                    release = '7'
-                else:
-                    release = '2008ServerR2'
-            elif min == 2:
-                if product_type == VER_NT_WORKSTATION:
-                    release = '8'
-                else:
-                    release = '2012Server'
-            else:
-                release = 'post2012Server'
-
-    else:
-        if not release:
-            # E.g. Win3.1 with win32s
-            release = '%i.%i' % (maj,min)
-        return release,version,csd,ptype
-
-    # Open the registry key
+    key = None
     try:
-        keyCurVer = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
-        # Get a value to make sure the key exists...
-        RegQueryValueEx(keyCurVer, 'SystemRoot')
+        key = OpenKeyEx(HKEY_LOCAL_MACHINE,
+                        r'SOFTWARE\Microsoft\Windows NT\CurrentVersion')
+        ptype = QueryValueEx(key, 'CurrentType')[0]
     except:
-        return release,version,csd,ptype
+        pass
+    finally:
+        if key:
+            CloseKey(key)
 
-    # Parse values
-    #subversion = _win32_getvalue(keyCurVer,
-    #                            'SubVersionNumber',
-    #                            ('',1))[0]
-    #if subversion:
-    #   release = release + subversion # 95a, 95b, etc.
-    build = _win32_getvalue(keyCurVer,
-                            'CurrentBuildNumber',
-                            ('',1))[0]
-    ptype = _win32_getvalue(keyCurVer,
-                           'CurrentType',
-                           (ptype,1))[0]
-
-    # Normalize version
-    version = _norm_version(version,build)
-
-    # Close key
-    RegCloseKey(keyCurVer)
-    return release,version,csd,ptype
+    return release, version, csd, ptype
 
 def _mac_ver_lookup(selectors,default=None):
 
diff --git a/lib/python2.7/pydoc.py b/lib/python2.7/pydoc.py
index d7396f0..9316fff 100755
--- a/lib/python2.7/pydoc.py
+++ b/lib/python2.7/pydoc.py
@@ -2244,8 +2244,11 @@
             if self.scanner:
                 self.scanner.quit = 1
             self.scanner = ModuleScanner()
+            def onerror(modname):
+                pass
             threading.Thread(target=self.scanner.run,
-                             args=(self.update, key, self.done)).start()
+                             args=(self.update, key, self.done),
+                             kwargs=dict(onerror=onerror)).start()
 
         def update(self, path, modname, desc):
             if modname[-9:] == '.__init__':
diff --git a/lib/python2.7/pydoc_data/topics.py b/lib/python2.7/pydoc_data/topics.py
index 30dc44b..797348d 100644
--- a/lib/python2.7/pydoc_data/topics.py
+++ b/lib/python2.7/pydoc_data/topics.py
@@ -1,5 +1,5 @@
 # -*- coding: utf-8 -*-
-# Autogenerated by Sphinx on Sun May 10 13:12:18 2015
+# Autogenerated by Sphinx on Wed Jan  6 03:48:54 2016
 topics = {'assert': u'\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n   assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n   if __debug__:\n      if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n   if __debug__:\n      if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names.  In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O).  The current code generator emits no code for an\nassert statement when optimization is requested at compile time.  Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal.  The value for the built-in\nvariable is determined when the interpreter starts.\n',
  'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n   assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n   target_list     ::= target ("," target)* [","]\n   target          ::= identifier\n              | "(" target_list ")"\n              | "[" target_list "]"\n              | attributeref\n              | subscription\n              | slicing\n\n(See section Primaries for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable.  The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section The standard type\nhierarchy).\n\nAssignment of an object to a target list is recursively defined as\nfollows.\n\n* If the target list is a single target: The object is assigned to\n  that target.\n\n* If the target list is a comma-separated list of targets: The\n  object must be an iterable with the same number of items as there\n  are targets in the target list, and the items are assigned, from\n  left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n  * If the name does not occur in a "global" statement in the\n    current code block: the name is bound to the object in the current\n    local namespace.\n\n  * Otherwise: the name is bound to the object in the current global\n    namespace.\n\n  The name is rebound if it was already bound.  This may cause the\n  reference count for the object previously bound to the name to reach\n  zero, causing the object to be deallocated and its destructor (if it\n  has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n  square brackets: The object must be an iterable with the same number\n  of items as there are targets in the target list, and its items are\n  assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n  the reference is evaluated.  It should yield an object with\n  assignable attributes; if this is not the case, "TypeError" is\n  raised.  That object is then asked to assign the assigned object to\n  the given attribute; if it cannot perform the assignment, it raises\n  an exception (usually but not necessarily "AttributeError").\n\n  Note: If the object is a class instance and the attribute reference\n  occurs on both sides of the assignment operator, the RHS expression,\n  "a.x" can access either an instance attribute or (if no instance\n  attribute exists) a class attribute.  The LHS target "a.x" is always\n  set as an instance attribute, creating it if necessary.  Thus, the\n  two occurrences of "a.x" do not necessarily refer to the same\n  attribute: if the RHS expression refers to a class attribute, the\n  LHS creates a new instance attribute as the target of the\n  assignment:\n\n     class Cls:\n         x = 3             # class variable\n     inst = Cls()\n     inst.x = inst.x + 1   # writes inst.x as 4 leaving Cls.x as 3\n\n  This description does not necessarily apply to descriptor\n  attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n  reference is evaluated.  It should yield either a mutable sequence\n  object (such as a list) or a mapping object (such as a dictionary).\n  Next, the subscript expression is evaluated.\n\n  If the primary is a mutable sequence object (such as a list), the\n  subscript must yield a plain integer.  If it is negative, the\n  sequence\'s length is added to it. The resulting value must be a\n  nonnegative integer less than the sequence\'s length, and the\n  sequence is asked to assign the assigned object to its item with\n  that index.  If the index is out of range, "IndexError" is raised\n  (assignment to a subscripted sequence cannot add new items to a\n  list).\n\n  If the primary is a mapping object (such as a dictionary), the\n  subscript must have a type compatible with the mapping\'s key type,\n  and the mapping is then asked to create a key/datum pair which maps\n  the subscript to the assigned object.  This can either replace an\n  existing key/value pair with the same key value, or insert a new\n  key/value pair (if no key with the same value existed).\n\n* If the target is a slicing: The primary expression in the\n  reference is evaluated.  It should yield a mutable sequence object\n  (such as a list).  The assigned object should be a sequence object\n  of the same type.  Next, the lower and upper bound expressions are\n  evaluated, insofar they are present; defaults are zero and the\n  sequence\'s length.  The bounds should evaluate to (small) integers.\n  If either bound is negative, the sequence\'s length is added to it.\n  The resulting bounds are clipped to lie between zero and the\n  sequence\'s length, inclusive.  Finally, the sequence object is asked\n  to replace the slice with the items of the assigned sequence.  The\n  length of the slice may be different from the length of the assigned\n  sequence, thus changing the length of the target sequence, if the\n  object allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe!  For instance, the\nfollowing program prints "[0, 2]":\n\n   x = [0, 1]\n   i = 0\n   i, x[i] = 1, 2\n   print x\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n   augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n   augtarget                 ::= identifier | attributeref | subscription | slicing\n   augop                     ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n             | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section Primaries for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same caveat about\nclass and instance attributes applies as for regular assignments.\n',
  'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name.  See section Identifiers\nand keywords for lexical definition and section Naming and binding for\ndocumentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them.  The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name.  For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used.  If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n',
@@ -11,6 +11,7 @@
  'bitwise': u'\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n   and_expr ::= shift_expr | and_expr "&" shift_expr\n   xor_expr ::= and_expr | xor_expr "^" and_expr\n   or_expr  ::= xor_expr | or_expr "|" xor_expr\n\nThe "&" operator yields the bitwise AND of its arguments, which must\nbe plain or long integers.  The arguments are converted to a common\ntype.\n\nThe "^" operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be plain or long integers.  The arguments are\nconverted to a common type.\n\nThe "|" operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be plain or long integers.  The arguments are converted to\na common type.\n',
  'bltin-code-objects': u'\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment.  Code objects are returned by the built-\nin "compile()" function and can be extracted from function objects\nthrough their "func_code" attribute. See also the "code" module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the "exec" statement or the built-in "eval()"\nfunction.\n\nSee The standard type hierarchy for more information.\n',
  'bltin-ellipsis-object': u'\nThe Ellipsis Object\n*******************\n\nThis object is used by extended slice notation (see Slicings).  It\nsupports no special operations.  There is exactly one ellipsis object,\nnamed "Ellipsis" (a built-in name).\n\nIt is written as "Ellipsis".  When in a subscript, it can also be\nwritten as "...", for example "seq[...]".\n',
+ 'bltin-file-objects': u'\nFile Objects\n************\n\nFile objects are implemented using C\'s "stdio" package and can be\ncreated with the built-in "open()" function.  File objects are also\nreturned by some other built-in functions and methods, such as\n"os.popen()" and "os.fdopen()" and the "makefile()" method of socket\nobjects. Temporary files can be created using the "tempfile" module,\nand high-level file operations such as copying, moving, and deleting\nfiles and directories can be achieved with the "shutil" module.\n\nWhen a file operation fails for an I/O-related reason, the exception\n"IOError" is raised.  This includes situations where the operation is\nnot defined for some reason, like "seek()" on a tty device or writing\na file opened for reading.\n\nFiles have the following methods:\n\nfile.close()\n\n   Close the file.  A closed file cannot be read or written any more.\n   Any operation which requires that the file be open will raise a\n   "ValueError" after the file has been closed.  Calling "close()"\n   more than once is allowed.\n\n   As of Python 2.5, you can avoid having to call this method\n   explicitly if you use the "with" statement.  For example, the\n   following code will automatically close *f* when the "with" block\n   is exited:\n\n      from __future__ import with_statement # This isn\'t required in Python 2.6\n\n      with open("hello.txt") as f:\n          for line in f:\n              print line,\n\n   In older versions of Python, you would have needed to do this to\n   get the same effect:\n\n      f = open("hello.txt")\n      try:\n          for line in f:\n              print line,\n      finally:\n          f.close()\n\n   Note: Not all "file-like" types in Python support use as a\n     context manager for the "with" statement.  If your code is\n     intended to work with any file-like object, you can use the\n     function "contextlib.closing()" instead of using the object\n     directly.\n\nfile.flush()\n\n   Flush the internal buffer, like "stdio"\'s "fflush()".  This may be\n   a no-op on some file-like objects.\n\n   Note: "flush()" does not necessarily write the file\'s data to\n     disk. Use "flush()" followed by "os.fsync()" to ensure this\n     behavior.\n\nfile.fileno()\n\n   Return the integer "file descriptor" that is used by the underlying\n   implementation to request I/O operations from the operating system.\n   This can be useful for other, lower level interfaces that use file\n   descriptors, such as the "fcntl" module or "os.read()" and friends.\n\n   Note: File-like objects which do not have a real file descriptor\n     should *not* provide this method!\n\nfile.isatty()\n\n   Return "True" if the file is connected to a tty(-like) device, else\n   "False".\n\n   Note: If a file-like object is not associated with a real file,\n     this method should *not* be implemented.\n\nfile.next()\n\n   A file object is its own iterator, for example "iter(f)" returns\n   *f* (unless *f* is closed).  When a file is used as an iterator,\n   typically in a "for" loop (for example, "for line in f: print\n   line.strip()"), the "next()" method is called repeatedly.  This\n   method returns the next input line, or raises "StopIteration" when\n   EOF is hit when the file is open for reading (behavior is undefined\n   when the file is open for writing).  In order to make a "for" loop\n   the most efficient way of looping over the lines of a file (a very\n   common operation), the "next()" method uses a hidden read-ahead\n   buffer.  As a consequence of using a read-ahead buffer, combining\n   "next()" with other file methods (like "readline()") does not work\n   right.  However, using "seek()" to reposition the file to an\n   absolute position will flush the read-ahead buffer.\n\n   New in version 2.3.\n\nfile.read([size])\n\n   Read at most *size* bytes from the file (less if the read hits EOF\n   before obtaining *size* bytes).  If the *size* argument is negative\n   or omitted, read all data until EOF is reached.  The bytes are\n   returned as a string object.  An empty string is returned when EOF\n   is encountered immediately.  (For certain files, like ttys, it\n   makes sense to continue reading after an EOF is hit.)  Note that\n   this method may call the underlying C function "fread()" more than\n   once in an effort to acquire as close to *size* bytes as possible.\n   Also note that when in non-blocking mode, less data than was\n   requested may be returned, even if no *size* parameter was given.\n\n   Note: This function is simply a wrapper for the underlying\n     "fread()" C function, and will behave the same in corner cases,\n     such as whether the EOF value is cached.\n\nfile.readline([size])\n\n   Read one entire line from the file.  A trailing newline character\n   is kept in the string (but may be absent when a file ends with an\n   incomplete line). [6] If the *size* argument is present and non-\n   negative, it is a maximum byte count (including the trailing\n   newline) and an incomplete line may be returned. When *size* is not\n   0, an empty string is returned *only* when EOF is encountered\n   immediately.\n\n   Note: Unlike "stdio"\'s "fgets()", the returned string contains\n     null characters ("\'\\0\'") if they occurred in the input.\n\nfile.readlines([sizehint])\n\n   Read until EOF using "readline()" and return a list containing the\n   lines thus read.  If the optional *sizehint* argument is present,\n   instead of reading up to EOF, whole lines totalling approximately\n   *sizehint* bytes (possibly after rounding up to an internal buffer\n   size) are read.  Objects implementing a file-like interface may\n   choose to ignore *sizehint* if it cannot be implemented, or cannot\n   be implemented efficiently.\n\nfile.xreadlines()\n\n   This method returns the same thing as "iter(f)".\n\n   New in version 2.1.\n\n   Deprecated since version 2.3: Use "for line in file" instead.\n\nfile.seek(offset[, whence])\n\n   Set the file\'s current position, like "stdio"\'s "fseek()". The\n   *whence* argument is optional and defaults to  "os.SEEK_SET" or "0"\n   (absolute file positioning); other values are "os.SEEK_CUR" or "1"\n   (seek relative to the current position) and "os.SEEK_END" or "2"\n   (seek relative to the file\'s end).  There is no return value.\n\n   For example, "f.seek(2, os.SEEK_CUR)" advances the position by two\n   and "f.seek(-3, os.SEEK_END)" sets the position to the third to\n   last.\n\n   Note that if the file is opened for appending (mode "\'a\'" or\n   "\'a+\'"), any "seek()" operations will be undone at the next write.\n   If the file is only opened for writing in append mode (mode "\'a\'"),\n   this method is essentially a no-op, but it remains useful for files\n   opened in append mode with reading enabled (mode "\'a+\'").  If the\n   file is opened in text mode (without "\'b\'"), only offsets returned\n   by "tell()" are legal.  Use of other offsets causes undefined\n   behavior.\n\n   Note that not all file objects are seekable.\n\n   Changed in version 2.6: Passing float values as offset has been\n   deprecated.\n\nfile.tell()\n\n   Return the file\'s current position, like "stdio"\'s "ftell()".\n\n   Note: On Windows, "tell()" can return illegal values (after an\n     "fgets()") when reading files with Unix-style line-endings. Use\n     binary mode ("\'rb\'") to circumvent this problem.\n\nfile.truncate([size])\n\n   Truncate the file\'s size.  If the optional *size* argument is\n   present, the file is truncated to (at most) that size.  The size\n   defaults to the current position. The current file position is not\n   changed.  Note that if a specified size exceeds the file\'s current\n   size, the result is platform-dependent:  possibilities include that\n   the file may remain unchanged, increase to the specified size as if\n   zero-filled, or increase to the specified size with undefined new\n   content. Availability:  Windows, many Unix variants.\n\nfile.write(str)\n\n   Write a string to the file.  There is no return value.  Due to\n   buffering, the string may not actually show up in the file until\n   the "flush()" or "close()" method is called.\n\nfile.writelines(sequence)\n\n   Write a sequence of strings to the file.  The sequence can be any\n   iterable object producing strings, typically a list of strings.\n   There is no return value. (The name is intended to match\n   "readlines()"; "writelines()" does not add line separators.)\n\nFiles support the iterator protocol.  Each iteration returns the same\nresult as "readline()", and iteration ends when the "readline()"\nmethod returns an empty string.\n\nFile objects also offer a number of other interesting attributes.\nThese are not required for file-like objects, but should be\nimplemented if they make sense for the particular object.\n\nfile.closed\n\n   bool indicating the current state of the file object.  This is a\n   read-only attribute; the "close()" method changes the value. It may\n   not be available on all file-like objects.\n\nfile.encoding\n\n   The encoding that this file uses. When Unicode strings are written\n   to a file, they will be converted to byte strings using this\n   encoding. In addition, when the file is connected to a terminal,\n   the attribute gives the encoding that the terminal is likely to use\n   (that  information might be incorrect if the user has misconfigured\n   the  terminal). The attribute is read-only and may not be present\n   on all file-like objects. It may also be "None", in which case the\n   file uses the system default encoding for converting Unicode\n   strings.\n\n   New in version 2.3.\n\nfile.errors\n\n   The Unicode error handler used along with the encoding.\n\n   New in version 2.6.\n\nfile.mode\n\n   The I/O mode for the file.  If the file was created using the\n   "open()" built-in function, this will be the value of the *mode*\n   parameter.  This is a read-only attribute and may not be present on\n   all file-like objects.\n\nfile.name\n\n   If the file object was created using "open()", the name of the\n   file. Otherwise, some string that indicates the source of the file\n   object, of the form "<...>".  This is a read-only attribute and may\n   not be present on all file-like objects.\n\nfile.newlines\n\n   If Python was built with *universal newlines* enabled (the default)\n   this read-only attribute exists, and for files opened in universal\n   newline read mode it keeps track of the types of newlines\n   encountered while reading the file. The values it can take are\n   "\'\\r\'", "\'\\n\'", "\'\\r\\n\'", "None" (unknown, no newlines read yet) or\n   a tuple containing all the newline types seen, to indicate that\n   multiple newline conventions were encountered. For files not opened\n   in universal newlines read mode the value of this attribute will be\n   "None".\n\nfile.softspace\n\n   Boolean that indicates whether a space character needs to be\n   printed before another value when using the "print" statement.\n   Classes that are trying to simulate a file object should also have\n   a writable "softspace" attribute, which should be initialized to\n   zero.  This will be automatic for most classes implemented in\n   Python (care may be needed for objects that override attribute\n   access); types implemented in C will have to provide a writable\n   "softspace" attribute.\n\n   Note: This attribute is not used to control the "print"\n     statement, but to allow the implementation of "print" to keep\n     track of its internal state.\n',
  'bltin-null-object': u'\nThe Null Object\n***************\n\nThis object is returned by functions that don\'t explicitly return a\nvalue.  It supports no special operations.  There is exactly one null\nobject, named "None" (a built-in name).\n\nIt is written as "None".\n',
  'bltin-type-objects': u'\nType Objects\n************\n\nType objects represent the various object types.  An object\'s type is\naccessed by the built-in function "type()".  There are no special\noperations on types.  The standard module "types" defines names for\nall standard built-in types.\n\nTypes are written like this: "<type \'int\'>".\n',
  'booleans': u'\nBoolean operations\n******************\n\n   or_test  ::= and_test | or_test "or" and_test\n   and_test ::= not_test | and_test "and" not_test\n   not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: "False", "None", numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets).  All other values are interpreted\nas true.  (See the "__nonzero__()" special method for a way to change\nthis.)\n\nThe operator "not" yields "True" if its argument is false, "False"\notherwise.\n\nThe expression "x and y" first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression "x or y" first evaluates *x*; if *x* is true, its value\nis returned; otherwise, *y* is evaluated and the resulting value is\nreturned.\n\n(Note that neither "and" nor "or" restrict the value and type they\nreturn to "False" and "True", but rather return the last evaluated\nargument. This is sometimes useful, e.g., if "s" is a string that\nshould be replaced by a default value if it is empty, the expression\n"s or \'foo\'" yields the desired value.  Because "not" has to invent a\nvalue anyway, it does not bother to return a value of the same type as\nits argument, so e.g., "not \'foo\'" yields "False", not "\'\'".)\n',
@@ -19,12 +20,12 @@
  'calls': u'\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n   call                 ::= primary "(" [argument_list [","]\n            | expression genexpr_for] ")"\n   argument_list        ::= positional_arguments ["," keyword_arguments]\n                       ["," "*" expression] ["," keyword_arguments]\n                       ["," "**" expression]\n                     | keyword_arguments ["," "*" expression]\n                       ["," "**" expression]\n                     | "*" expression ["," keyword_arguments] ["," "**" expression]\n                     | "**" expression\n   positional_arguments ::= expression ("," expression)*\n   keyword_arguments    ::= keyword_item ("," keyword_item)*\n   keyword_item         ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and certain class instances\nthemselves are callable; extensions may define additional callable\nobject types).  All argument expressions are evaluated before the call\nis attempted.  Please refer to section Function definitions for the\nsyntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows.  First, a list of unfilled slots is\ncreated for the formal parameters.  If there are N positional\narguments, they are placed in the first N slots.  Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on).  If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot).  When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition.  (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.)  If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised.  Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword.  In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable.  Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow).  So:\n\n   >>> def f(a, b):\n   ...  print a, b\n   ...\n   >>> f(b=1, *(2,))\n   2 1\n   >>> f(a=1, *(2,))\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in ?\n   TypeError: f() got multiple values for keyword argument \'a\'\n   >>> f(1, *(2,))\n   1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments.  In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames.  Formal parameters using the syntax "(sublist)" cannot be used\nas keyword argument names; the outermost sublist corresponds to a\nsingle unnamed argument slot, and the argument value is assigned to\nthe sublist using the usual tuple assignment rules after all other\nparameter processing is done.\n\nA call always returns some value, possibly "None", unless it raises an\nexception.  How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n   The code block for the function is executed, passing it the\n   argument list.  The first thing the code block will do is bind the\n   formal parameters to the arguments; this is described in section\n   Function definitions.  When the code block executes a "return"\n   statement, this specifies the return value of the function call.\n\na built-in function or method:\n   The result is up to the interpreter; see Built-in Functions for the\n   descriptions of built-in functions and methods.\n\na class object:\n   A new instance of that class is returned.\n\na class instance method:\n   The corresponding user-defined function is called, with an argument\n   list that is one longer than the argument list of the call: the\n   instance becomes the first argument.\n\na class instance:\n   The class must define a "__call__()" method; the effect is then the\n   same as if that method was called.\n',
  'class': u'\nClass definitions\n*****************\n\nA class definition defines a class object (see section The standard\ntype hierarchy):\n\n   classdef    ::= "class" classname [inheritance] ":" suite\n   inheritance ::= "(" [expression_list] ")"\n   classname   ::= identifier\n\nA class definition is an executable statement.  It first evaluates the\ninheritance list, if present.  Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing.  The class\'s suite is then executed in a new execution\nframe (see section Naming and binding), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.)  When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary.  The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances.  To create instance\nvariables, they can be set in a method with "self.name = value".  Both\nclass and instance variables are accessible through the notation\n""self.name"", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results.  For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions.  The evaluation rules for the decorator\nexpressions are the same as for functions.  The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n    there is a "finally" clause which happens to raise another\n    exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n    an exception or the execution of a "return", "continue", or\n    "break" statement.\n\n[3] A string literal appearing as the first statement in the\n    function body is transformed into the function\'s "__doc__"\n    attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n    body is transformed into the namespace\'s "__doc__" item and\n    therefore the class\'s *docstring*.\n',
  'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation.  Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n   comparison    ::= or_expr ( comp_operator or_expr )*\n   comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n                     | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe forms "<>" and "!=" are equivalent; for consistency with C, "!="\nis preferred; where "!=" is mentioned below "<>" is also accepted.\nThe "<>" spelling is considered obsolescent.\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects.  The objects need not have the same type. If both are\nnumbers, they are converted to a common type.  Otherwise, objects of\ndifferent types *always* compare unequal, and are ordered consistently\nbut arbitrarily. You can control comparison behavior of objects of\nnon-built-in types by defining a "__cmp__" method or rich comparison\nmethods like "__gt__", described in section Special method names.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the "in" and "not in"\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric\n  equivalents (the result of the built-in function "ord()") of their\n  characters. Unicode and 8-bit strings are fully interoperable in\n  this behavior. [4]\n\n* Tuples and lists are compared lexicographically using comparison\n  of corresponding elements.  This means that to compare equal, each\n  element must compare equal and the two sequences must be of the same\n  type and have the same length.\n\n  If not equal, the sequences are ordered the same as their first\n  differing elements.  For example, "cmp([1,2,x], [1,2,y])" returns\n  the same as "cmp(x,y)".  If the corresponding element does not\n  exist, the shorter sequence is ordered first (for example, "[1,2] <\n  [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n  (key, value) lists compare equal. [5] Outcomes other than equality\n  are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they\n  are the same object; the choice whether one object is considered\n  smaller or larger than another one is made arbitrarily but\n  consistently within one execution of a program.\n\nThe operators "in" and "not in" test for collection membership.  "x in\ns" evaluates to true if *x* is a member of the collection *s*, and\nfalse otherwise.  "x not in s" returns the negation of "x in s". The\ncollection membership test has traditionally been bound to sequences;\nan object is a member of a collection if the collection is a sequence\nand contains an element equal to that object.  However, it make sense\nfor many other object types to support membership tests without being\na sequence.  In particular, dictionaries (for keys) and sets support\nmembership testing.\n\nFor the list and tuple types, "x in y" is true if and only if there\nexists an index *i* such that "x == y[i]" is true.\n\nFor the Unicode and string types, "x in y" is true if and only if *x*\nis a substring of *y*.  An equivalent test is "y.find(x) != -1".\nNote, *x* and *y* need not be the same type; consequently, "u\'ab\' in\n\'abc\'" will return "True". Empty strings are always considered to be a\nsubstring of any other string, so """ in "abc"" will return "True".\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength "1".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y".  If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object.  "x is not y"\nyields the inverse truth value. [7]\n',
- 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way.  In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs.  "try" specifies exception handlers and/or cleanup\ncode for a group of statements.  Function and class definitions are\nalso syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\'  A clause\nconsists of a header and a \'suite.\'  The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon.  A suite is a group of statements controlled by a\nclause.  A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines.  Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n   if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print" statements are executed:\n\n   if x < y < z: print x; print y; print z\n\nSummarizing:\n\n   compound_stmt ::= if_stmt\n                     | while_stmt\n                     | for_stmt\n                     | try_stmt\n                     | with_stmt\n                     | funcdef\n                     | classdef\n                     | decorated\n   suite         ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n   statement     ::= stmt_list NEWLINE | compound_stmt\n   stmt_list     ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n   if_stmt ::= "if" expression ":" suite\n               ( "elif" expression ":" suite )*\n               ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n   while_stmt ::= "while" expression ":" suite\n                  ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite.  A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n   for_stmt ::= "for" target_list "in" expression_list ":" suite\n                ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject.  An iterator is created for the result of the\n"expression_list".  The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices.  Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed.  When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite.  A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop.  Hint: the built-in function "range()" returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s "for i := a to b\ndo"; e.g., "range(3)" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n  loop (this can only occur for mutable sequences, i.e. lists). An\n  internal counter is used to keep track of which item is used next,\n  and this is incremented on each iteration.  When this counter has\n  reached the length of the sequence the loop terminates.  This means\n  that if the suite deletes the current (or a previous) item from the\n  sequence, the next item will be skipped (since it gets the index of\n  the current item which has already been treated).  Likewise, if the\n  suite inserts an item in the sequence before the current item, the\n  current item will be treated again the next time through the loop.\n  This can lead to nasty bugs that can be avoided by making a\n  temporary copy using a slice of the whole sequence, e.g.,\n\n     for x in a[:]:\n         if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n   try_stmt  ::= try1_stmt | try2_stmt\n   try1_stmt ::= "try" ":" suite\n                 ("except" [expression [("as" | ",") identifier]] ":" suite)+\n                 ["else" ":" suite]\n                 ["finally" ":" suite]\n   try2_stmt ::= "try" ":" suite\n                 "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n"try"..."except"..."finally" did not work. "try"..."except" had to be\nnested in "try"..."finally".\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started.  This search inspects the except clauses\nin turn until one is found that matches the exception.  An expression-\nless except clause, if present, must be last; it matches any\nexception.  For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception.  An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject, or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed.  All except clauses must have an\nexecutable block.  When the end of this block is reached, execution\ncontinues normally after the entire try statement.  (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the "sys" module:\n"sys.exc_type" receives the object identifying the exception;\n"sys.exc_value" receives the exception\'s parameter;\n"sys.exc_traceback" receives a traceback object (see section The\nstandard type hierarchy) identifying the point in the program where\nthe exception occurred. These details are also available through the\n"sys.exc_info()" function, which returns a tuple "(exc_type,\nexc_value, exc_traceback)".  Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program.  As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler.  The "try"\nclause is executed, including any "except" and "else" clauses.  If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed.  If\nthere is a saved exception, it is re-raised at the end of the\n"finally" clause. If the "finally" clause raises another exception or\nexecutes a "return" or "break" statement, the saved exception is\ndiscarded:\n\n   >>> def f():\n   ...     try:\n   ...         1/0\n   ...     finally:\n   ...         return 42\n   ...\n   >>> f()\n   42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed.  Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n   >>> def foo():\n   ...     try:\n   ...         return \'try\'\n   ...     finally:\n   ...         return \'finally\'\n   ...\n   >>> foo()\n   \'finally\'\n\nAdditional information on exceptions can be found in section\nExceptions, and information on using the "raise" statement to generate\nexceptions may be found in section The raise statement.\n\n\nThe "with" statement\n====================\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section With Statement\nContext Managers). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n   with_stmt ::= "with" with_item ("," with_item)* ":" suite\n   with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n   is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n   value from "__enter__()" is assigned to it.\n\n   Note: The "with" statement guarantees that if the "__enter__()"\n     method returns without an error, then "__exit__()" will always be\n     called. Thus, if an error occurs during the assignment to the\n     target list, it will be treated the same as an error occurring\n     within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n   exception caused the suite to be exited, its type, value, and\n   traceback are passed as arguments to "__exit__()". Otherwise, three\n   "None" arguments are supplied.\n\n   If the suite was exited due to an exception, and the return value\n   from the "__exit__()" method was false, the exception is reraised.\n   If the return value was true, the exception is suppressed, and\n   execution continues with the statement following the "with"\n   statement.\n\n   If the suite was exited for any reason other than an exception, the\n   return value from "__exit__()" is ignored, and execution proceeds\n   at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n   with A() as a, B() as b:\n       suite\n\nis equivalent to\n\n   with A() as a:\n       with B() as b:\n           suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n  "with_statement" feature has been enabled.  It is always enabled in\n  Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n     The specification, background, and examples for the Python "with"\n     statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection The standard type hierarchy):\n\n   decorated      ::= decorators (classdef | funcdef)\n   decorators     ::= decorator+\n   decorator      ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n   funcdef        ::= "def" funcname "(" [parameter_list] ")" ":" suite\n   dotted_name    ::= identifier ("." identifier)*\n   parameter_list ::= (defparameter ",")*\n                      (  "*" identifier ["," "**" identifier]\n                      | "**" identifier\n                      | defparameter [","] )\n   defparameter   ::= parameter ["=" expression]\n   sublist        ::= parameter ("," parameter)* [","]\n   parameter      ::= identifier | "(" sublist ")"\n   funcname       ::= identifier\n\nA function definition is an executable statement.  Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function).  This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition.  The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object.  Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n   @f1(arg)\n   @f2\n   def func(): pass\n\nis equivalent to:\n\n   def func(): pass\n   func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted.  If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.**  This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call.  This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended.  A way around this  is to use "None" as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n   def whats_on_the_telly(penguin=None):\n       if penguin is None:\n           penguin = []\n       penguin.append("property of the zoo")\n       return penguin\n\nFunction call semantics are described in more detail in section Calls.\nA function call always assigns values to all parameters mentioned in\nthe parameter list, either from position arguments, from keyword\narguments, or from default values.  If the form ""*identifier"" is\npresent, it is initialized to a tuple receiving any excess positional\nparameters, defaulting to the empty tuple.  If the form\n""**identifier"" is present, it is initialized to a new dictionary\nreceiving any excess keyword arguments, defaulting to a new empty\ndictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions.  This uses lambda\nexpressions, described in section Lambdas.  Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression.  The ""def"" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects.  A ""def""\nform executed inside a function definition defines a local function\nthat can be returned or passed around.  Free variables used in the\nnested function can access the local variables of the function\ncontaining the def.  See section Naming and binding for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section The standard\ntype hierarchy):\n\n   classdef    ::= "class" classname [inheritance] ":" suite\n   inheritance ::= "(" [expression_list] ")"\n   classname   ::= identifier\n\nA class definition is an executable statement.  It first evaluates the\ninheritance list, if present.  Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing.  The class\'s suite is then executed in a new execution\nframe (see section Naming and binding), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.)  When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary.  The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances.  To create instance\nvariables, they can be set in a method with "self.name = value".  Both\nclass and instance variables are accessible through the notation\n""self.name"", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results.  For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions.  The evaluation rules for the decorator\nexpressions are the same as for functions.  The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n    there is a "finally" clause which happens to raise another\n    exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n    an exception or the execution of a "return", "continue", or\n    "break" statement.\n\n[3] A string literal appearing as the first statement in the\n    function body is transformed into the function\'s "__doc__"\n    attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n    body is transformed into the namespace\'s "__doc__" item and\n    therefore the class\'s *docstring*.\n',
- 'context-managers': u'\nWith Statement Context Managers\n*******************************\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code.  Context managers are normally\ninvoked using the "with" statement (described in section The with\nstatement), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see Context Manager Types.\n\nobject.__enter__(self)\n\n   Enter the runtime context related to this object. The "with"\n   statement will bind this method\'s return value to the target(s)\n   specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n   Exit the runtime context related to this object. The parameters\n   describe the exception that caused the context to be exited. If the\n   context was exited without an exception, all three arguments will\n   be "None".\n\n   If an exception is supplied, and the method wishes to suppress the\n   exception (i.e., prevent it from being propagated), it should\n   return a true value. Otherwise, the exception will be processed\n   normally upon exit from this method.\n\n   Note that "__exit__()" methods should not reraise the passed-in\n   exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n     The specification, background, and examples for the Python "with"\n     statement.\n',
+ 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way.  In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs.  "try" specifies exception handlers and/or cleanup\ncode for a group of statements.  Function and class definitions are\nalso syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\'  A clause\nconsists of a header and a \'suite.\'  The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon.  A suite is a group of statements controlled by a\nclause.  A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines.  Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n   if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print" statements are executed:\n\n   if x < y < z: print x; print y; print z\n\nSummarizing:\n\n   compound_stmt ::= if_stmt\n                     | while_stmt\n                     | for_stmt\n                     | try_stmt\n                     | with_stmt\n                     | funcdef\n                     | classdef\n                     | decorated\n   suite         ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n   statement     ::= stmt_list NEWLINE | compound_stmt\n   stmt_list     ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n   if_stmt ::= "if" expression ":" suite\n               ( "elif" expression ":" suite )*\n               ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n   while_stmt ::= "while" expression ":" suite\n                  ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite.  A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n   for_stmt ::= "for" target_list "in" expression_list ":" suite\n                ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject.  An iterator is created for the result of the\n"expression_list".  The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices.  Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed.  When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite.  A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop.  Hint: the built-in function "range()" returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s "for i := a to b\ndo"; e.g., "range(3)" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n  loop (this can only occur for mutable sequences, i.e. lists). An\n  internal counter is used to keep track of which item is used next,\n  and this is incremented on each iteration.  When this counter has\n  reached the length of the sequence the loop terminates.  This means\n  that if the suite deletes the current (or a previous) item from the\n  sequence, the next item will be skipped (since it gets the index of\n  the current item which has already been treated).  Likewise, if the\n  suite inserts an item in the sequence before the current item, the\n  current item will be treated again the next time through the loop.\n  This can lead to nasty bugs that can be avoided by making a\n  temporary copy using a slice of the whole sequence, e.g.,\n\n     for x in a[:]:\n         if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n   try_stmt  ::= try1_stmt | try2_stmt\n   try1_stmt ::= "try" ":" suite\n                 ("except" [expression [("as" | ",") identifier]] ":" suite)+\n                 ["else" ":" suite]\n                 ["finally" ":" suite]\n   try2_stmt ::= "try" ":" suite\n                 "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n"try"..."except"..."finally" did not work. "try"..."except" had to be\nnested in "try"..."finally".\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started.  This search inspects the except clauses\nin turn until one is found that matches the exception.  An expression-\nless except clause, if present, must be last; it matches any\nexception.  For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception.  An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject, or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed.  All except clauses must have an\nexecutable block.  When the end of this block is reached, execution\ncontinues normally after the entire try statement.  (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the "sys" module:\n"sys.exc_type" receives the object identifying the exception;\n"sys.exc_value" receives the exception\'s parameter;\n"sys.exc_traceback" receives a traceback object (see section The\nstandard type hierarchy) identifying the point in the program where\nthe exception occurred. These details are also available through the\n"sys.exc_info()" function, which returns a tuple "(exc_type,\nexc_value, exc_traceback)".  Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program.  As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler.  The "try"\nclause is executed, including any "except" and "else" clauses.  If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed.  If\nthere is a saved exception, it is re-raised at the end of the\n"finally" clause. If the "finally" clause raises another exception or\nexecutes a "return" or "break" statement, the saved exception is\ndiscarded:\n\n   >>> def f():\n   ...     try:\n   ...         1/0\n   ...     finally:\n   ...         return 42\n   ...\n   >>> f()\n   42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed.  Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n   >>> def foo():\n   ...     try:\n   ...         return \'try\'\n   ...     finally:\n   ...         return \'finally\'\n   ...\n   >>> foo()\n   \'finally\'\n\nAdditional information on exceptions can be found in section\nExceptions, and information on using the "raise" statement to generate\nexceptions may be found in section The raise statement.\n\n\nThe "with" statement\n====================\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section With Statement\nContext Managers). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n   with_stmt ::= "with" with_item ("," with_item)* ":" suite\n   with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n   is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n   value from "__enter__()" is assigned to it.\n\n   Note: The "with" statement guarantees that if the "__enter__()"\n     method returns without an error, then "__exit__()" will always be\n     called. Thus, if an error occurs during the assignment to the\n     target list, it will be treated the same as an error occurring\n     within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n   exception caused the suite to be exited, its type, value, and\n   traceback are passed as arguments to "__exit__()". Otherwise, three\n   "None" arguments are supplied.\n\n   If the suite was exited due to an exception, and the return value\n   from the "__exit__()" method was false, the exception is reraised.\n   If the return value was true, the exception is suppressed, and\n   execution continues with the statement following the "with"\n   statement.\n\n   If the suite was exited for any reason other than an exception, the\n   return value from "__exit__()" is ignored, and execution proceeds\n   at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n   with A() as a, B() as b:\n       suite\n\nis equivalent to\n\n   with A() as a:\n       with B() as b:\n           suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n  "with_statement" feature has been enabled.  It is always enabled in\n  Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n  **PEP 0343** - The "with" statement\n     The specification, background, and examples for the Python "with"\n     statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection The standard type hierarchy):\n\n   decorated      ::= decorators (classdef | funcdef)\n   decorators     ::= decorator+\n   decorator      ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n   funcdef        ::= "def" funcname "(" [parameter_list] ")" ":" suite\n   dotted_name    ::= identifier ("." identifier)*\n   parameter_list ::= (defparameter ",")*\n                      (  "*" identifier ["," "**" identifier]\n                      | "**" identifier\n                      | defparameter [","] )\n   defparameter   ::= parameter ["=" expression]\n   sublist        ::= parameter ("," parameter)* [","]\n   parameter      ::= identifier | "(" sublist ")"\n   funcname       ::= identifier\n\nA function definition is an executable statement.  Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function).  This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition.  The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object.  Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n   @f1(arg)\n   @f2\n   def func(): pass\n\nis equivalent to:\n\n   def func(): pass\n   func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted.  If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.**  This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call.  This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended.  A way around this  is to use "None" as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n   def whats_on_the_telly(penguin=None):\n       if penguin is None:\n           penguin = []\n       penguin.append("property of the zoo")\n       return penguin\n\nFunction call semantics are described in more detail in section Calls.\nA function call always assigns values to all parameters mentioned in\nthe parameter list, either from position arguments, from keyword\narguments, or from default values.  If the form ""*identifier"" is\npresent, it is initialized to a tuple receiving any excess positional\nparameters, defaulting to the empty tuple.  If the form\n""**identifier"" is present, it is initialized to a new dictionary\nreceiving any excess keyword arguments, defaulting to a new empty\ndictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions.  This uses lambda\nexpressions, described in section Lambdas.  Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression.  The ""def"" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects.  A ""def""\nform executed inside a function definition defines a local function\nthat can be returned or passed around.  Free variables used in the\nnested function can access the local variables of the function\ncontaining the def.  See section Naming and binding for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section The standard\ntype hierarchy):\n\n   classdef    ::= "class" classname [inheritance] ":" suite\n   inheritance ::= "(" [expression_list] ")"\n   classname   ::= identifier\n\nA class definition is an executable statement.  It first evaluates the\ninheritance list, if present.  Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing.  The class\'s suite is then executed in a new execution\nframe (see section Naming and binding), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.)  When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary.  The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances.  To create instance\nvariables, they can be set in a method with "self.name = value".  Both\nclass and instance variables are accessible through the notation\n""self.name"", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results.  For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions.  The evaluation rules for the decorator\nexpressions are the same as for functions.  The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n    there is a "finally" clause which happens to raise another\n    exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n    an exception or the execution of a "return", "continue", or\n    "break" statement.\n\n[3] A string literal appearing as the first statement in the\n    function body is transformed into the function\'s "__doc__"\n    attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n    body is transformed into the namespace\'s "__doc__" item and\n    therefore the class\'s *docstring*.\n',
+ 'context-managers': u'\nWith Statement Context Managers\n*******************************\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code.  Context managers are normally\ninvoked using the "with" statement (described in section The with\nstatement), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see Context Manager Types.\n\nobject.__enter__(self)\n\n   Enter the runtime context related to this object. The "with"\n   statement will bind this method\'s return value to the target(s)\n   specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n   Exit the runtime context related to this object. The parameters\n   describe the exception that caused the context to be exited. If the\n   context was exited without an exception, all three arguments will\n   be "None".\n\n   If an exception is supplied, and the method wishes to suppress the\n   exception (i.e., prevent it from being propagated), it should\n   return a true value. Otherwise, the exception will be processed\n   normally upon exit from this method.\n\n   Note that "__exit__()" methods should not reraise the passed-in\n   exception; this is the caller\'s responsibility.\n\nSee also:\n\n  **PEP 0343** - The "with" statement\n     The specification, background, and examples for the Python "with"\n     statement.\n',
  'continue': u'\nThe "continue" statement\n************************\n\n   continue_stmt ::= "continue"\n\n"continue" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition or "finally"\nclause within that loop.  It continues with the next cycle of the\nnearest enclosing loop.\n\nWhen "continue" passes control out of a "try" statement with a\n"finally" clause, that "finally" clause is executed before really\nstarting the next loop cycle.\n',
  'conversions': u'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," the arguments\nare coerced using the coercion rules listed at  Coercion rules.  If\nboth arguments are standard numeric types, the following coercions are\napplied:\n\n* If either argument is a complex number, the other is converted to\n  complex;\n\n* otherwise, if either argument is a floating point number, the\n  other is converted to floating point;\n\n* otherwise, if either argument is a long integer, the other is\n  converted to long integer;\n\n* otherwise, both must be plain integers and no conversion is\n  necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions can define their own\ncoercions.\n',
  'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n   Called to create a new instance of class *cls*.  "__new__()" is a\n   static method (special-cased so you need not declare it as such)\n   that takes the class of which an instance was requested as its\n   first argument.  The remaining arguments are those passed to the\n   object constructor expression (the call to the class).  The return\n   value of "__new__()" should be the new object instance (usually an\n   instance of *cls*).\n\n   Typical implementations create a new instance of the class by\n   invoking the superclass\'s "__new__()" method using\n   "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n   arguments and then modifying the newly-created instance as\n   necessary before returning it.\n\n   If "__new__()" returns an instance of *cls*, then the new\n   instance\'s "__init__()" method will be invoked like\n   "__init__(self[, ...])", where *self* is the new instance and the\n   remaining arguments are the same as were passed to "__new__()".\n\n   If "__new__()" does not return an instance of *cls*, then the new\n   instance\'s "__init__()" method will not be invoked.\n\n   "__new__()" is intended mainly to allow subclasses of immutable\n   types (like int, str, or tuple) to customize instance creation.  It\n   is also commonly overridden in custom metaclasses in order to\n   customize class creation.\n\nobject.__init__(self[, ...])\n\n   Called after the instance has been created (by "__new__()"), but\n   before it is returned to the caller.  The arguments are those\n   passed to the class constructor expression.  If a base class has an\n   "__init__()" method, the derived class\'s "__init__()" method, if\n   any, must explicitly call it to ensure proper initialization of the\n   base class part of the instance; for example:\n   "BaseClass.__init__(self, [args...])".\n\n   Because "__new__()" and "__init__()" work together in constructing\n   objects ("__new__()" to create it, and "__init__()" to customise\n   it), no non-"None" value may be returned by "__init__()"; doing so\n   will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n   Called when the instance is about to be destroyed.  This is also\n   called a destructor.  If a base class has a "__del__()" method, the\n   derived class\'s "__del__()" method, if any, must explicitly call it\n   to ensure proper deletion of the base class part of the instance.\n   Note that it is possible (though not recommended!) for the\n   "__del__()" method to postpone destruction of the instance by\n   creating a new reference to it.  It may then be called at a later\n   time when this new reference is deleted.  It is not guaranteed that\n   "__del__()" methods are called for objects that still exist when\n   the interpreter exits.\n\n   Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n     decrements the reference count for "x" by one, and the latter is\n     only called when "x"\'s reference count reaches zero.  Some common\n     situations that may prevent the reference count of an object from\n     going to zero include: circular references between objects (e.g.,\n     a doubly-linked list or a tree data structure with parent and\n     child pointers); a reference to the object on the stack frame of\n     a function that caught an exception (the traceback stored in\n     "sys.exc_traceback" keeps the stack frame alive); or a reference\n     to the object on the stack frame that raised an unhandled\n     exception in interactive mode (the traceback stored in\n     "sys.last_traceback" keeps the stack frame alive).  The first\n     situation can only be remedied by explicitly breaking the cycles;\n     the latter two situations can be resolved by storing "None" in\n     "sys.exc_traceback" or "sys.last_traceback".  Circular references\n     which are garbage are detected when the option cycle detector is\n     enabled (it\'s on by default), but can only be cleaned up if there\n     are no Python-level "__del__()" methods involved. Refer to the\n     documentation for the "gc" module for more information about how\n     "__del__()" methods are handled by the cycle detector,\n     particularly the description of the "garbage" value.\n\n   Warning: Due to the precarious circumstances under which\n     "__del__()" methods are invoked, exceptions that occur during\n     their execution are ignored, and a warning is printed to\n     "sys.stderr" instead. Also, when "__del__()" is invoked in\n     response to a module being deleted (e.g., when execution of the\n     program is done), other globals referenced by the "__del__()"\n     method may already have been deleted or in the process of being\n     torn down (e.g. the import machinery shutting down).  For this\n     reason, "__del__()" methods should do the absolute minimum needed\n     to maintain external invariants.  Starting with version 1.5,\n     Python guarantees that globals whose name begins with a single\n     underscore are deleted from their module before other globals are\n     deleted; if no other references to such globals exist, this may\n     help in assuring that imported modules are still available at the\n     time when the "__del__()" method is called.\n\n   See also the "-R" command-line option.\n\nobject.__repr__(self)\n\n   Called by the "repr()" built-in function and by string conversions\n   (reverse quotes) to compute the "official" string representation of\n   an object.  If at all possible, this should look like a valid\n   Python expression that could be used to recreate an object with the\n   same value (given an appropriate environment).  If this is not\n   possible, a string of the form "<...some useful description...>"\n   should be returned.  The return value must be a string object. If a\n   class defines "__repr__()" but not "__str__()", then "__repr__()"\n   is also used when an "informal" string representation of instances\n   of that class is required.\n\n   This is typically used for debugging, so it is important that the\n   representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n   Called by the "str()" built-in function and by the "print"\n   statement to compute the "informal" string representation of an\n   object.  This differs from "__repr__()" in that it does not have to\n   be a valid Python expression: a more convenient or concise\n   representation may be used instead. The return value must be a\n   string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n   New in version 2.1.\n\n   These are the so-called "rich comparison" methods, and are called\n   for comparison operators in preference to "__cmp__()" below. The\n   correspondence between operator symbols and method names is as\n   follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n   "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n   "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n   A rich comparison method may return the singleton "NotImplemented"\n   if it does not implement the operation for a given pair of\n   arguments. By convention, "False" and "True" are returned for a\n   successful comparison. However, these methods can return any value,\n   so if the comparison operator is used in a Boolean context (e.g.,\n   in the condition of an "if" statement), Python will call "bool()"\n   on the value to determine if the result is true or false.\n\n   There are no implied relationships among the comparison operators.\n   The truth of "x==y" does not imply that "x!=y" is false.\n   Accordingly, when defining "__eq__()", one should also define\n   "__ne__()" so that the operators will behave as expected.  See the\n   paragraph on "__hash__()" for some important notes on creating\n   *hashable* objects which support custom comparison operations and\n   are usable as dictionary keys.\n\n   There are no swapped-argument versions of these methods (to be used\n   when the left argument does not support the operation but the right\n   argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n   reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n   and "__eq__()" and "__ne__()" are their own reflection.\n\n   Arguments to rich comparison methods are never coerced.\n\n   To automatically generate ordering operations from a single root\n   operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n   Called by comparison operations if rich comparison (see above) is\n   not defined.  Should return a negative integer if "self < other",\n   zero if "self == other", a positive integer if "self > other".  If\n   no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n   class instances are compared by object identity ("address").  See\n   also the description of "__hash__()" for some important notes on\n   creating *hashable* objects which support custom comparison\n   operations and are usable as dictionary keys. (Note: the\n   restriction that exceptions are not propagated by "__cmp__()" has\n   been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n   Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n   Called by built-in function "hash()" and for operations on members\n   of hashed collections including "set", "frozenset", and "dict".\n   "__hash__()" should return an integer.  The only required property\n   is that objects which compare equal have the same hash value; it is\n   advised to somehow mix together (e.g. using exclusive or) the hash\n   values for the components of the object that also play a part in\n   comparison of objects.\n\n   If a class does not define a "__cmp__()" or "__eq__()" method it\n   should not define a "__hash__()" operation either; if it defines\n   "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n   not be usable in hashed collections.  If a class defines mutable\n   objects and implements a "__cmp__()" or "__eq__()" method, it\n   should not implement "__hash__()", since hashable collection\n   implementations require that a object\'s hash value is immutable (if\n   the object\'s hash value changes, it will be in the wrong hash\n   bucket).\n\n   User-defined classes have "__cmp__()" and "__hash__()" methods by\n   default; with them, all objects compare unequal (except with\n   themselves) and "x.__hash__()" returns a result derived from\n   "id(x)".\n\n   Classes which inherit a "__hash__()" method from a parent class but\n   change the meaning of "__cmp__()" or "__eq__()" such that the hash\n   value returned is no longer appropriate (e.g. by switching to a\n   value-based concept of equality instead of the default identity\n   based equality) can explicitly flag themselves as being unhashable\n   by setting "__hash__ = None" in the class definition. Doing so\n   means that not only will instances of the class raise an\n   appropriate "TypeError" when a program attempts to retrieve their\n   hash value, but they will also be correctly identified as\n   unhashable when checking "isinstance(obj, collections.Hashable)"\n   (unlike classes which define their own "__hash__()" to explicitly\n   raise "TypeError").\n\n   Changed in version 2.5: "__hash__()" may now also return a long\n   integer object; the 32-bit integer is then derived from the hash of\n   that object.\n\n   Changed in version 2.6: "__hash__" may now be set to "None" to\n   explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n   Called to implement truth value testing and the built-in operation\n   "bool()"; should return "False" or "True", or their integer\n   equivalents "0" or "1".  When this method is not defined,\n   "__len__()" is called, if it is defined, and the object is\n   considered true if its result is nonzero. If a class defines\n   neither "__len__()" nor "__nonzero__()", all its instances are\n   considered true.\n\nobject.__unicode__(self)\n\n   Called to implement "unicode()" built-in; should return a Unicode\n   object. When this method is not defined, string conversion is\n   attempted, and the result of string conversion is converted to\n   Unicode using the system default encoding.\n',
- 'debugger': u'\n"pdb" --- The Python Debugger\n*****************************\n\n**Source code:** Lib/pdb.py\n\n======================================================================\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs.  It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame.  It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible --- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source.  The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n   >>> import pdb\n   >>> import mymodule\n   >>> pdb.run(\'mymodule.test()\')\n   > <string>(0)?()\n   (Pdb) continue\n   > <string>(1)?()\n   (Pdb) continue\n   NameError: \'spam\'\n   > <string>(1)?()\n   (Pdb)\n\n"pdb.py" can also be invoked as a script to debug other scripts.  For\nexample:\n\n   python -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 2.4: Restarting post-mortem behavior added.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n   import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger.  You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "c" command.\n\nThe typical usage to inspect a crashed program is:\n\n   >>> import pdb\n   >>> import mymodule\n   >>> mymodule.test()\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in ?\n     File "./mymodule.py", line 4, in test\n       test2()\n     File "./mymodule.py", line 3, in test2\n       print spam\n   NameError: spam\n   >>> pdb.pm()\n   > ./mymodule.py(3)test2()\n   -> print spam\n   (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement[, globals[, locals]])\n\n   Execute the *statement* (given as a string) under debugger control.\n   The debugger prompt appears before any code is executed; you can\n   set breakpoints and type "continue", or you can step through the\n   statement using "step" or "next" (all these commands are explained\n   below).  The optional *globals* and *locals* arguments specify the\n   environment in which the code is executed; by default the\n   dictionary of the module "__main__" is used.  (See the explanation\n   of the "exec" statement or the "eval()" built-in function.)\n\npdb.runeval(expression[, globals[, locals]])\n\n   Evaluate the *expression* (given as a string) under debugger\n   control.  When "runeval()" returns, it returns the value of the\n   expression.  Otherwise this function is similar to "run()".\n\npdb.runcall(function[, argument, ...])\n\n   Call the *function* (a function or method object, not a string)\n   with the given arguments.  When "runcall()" returns, it returns\n   whatever the function call returned.  The debugger prompt appears\n   as soon as the function is entered.\n\npdb.set_trace()\n\n   Enter the debugger at the calling stack frame.  This is useful to\n   hard-code a breakpoint at a given point in a program, even if the\n   code is not otherwise being debugged (e.g. when an assertion\n   fails).\n\npdb.post_mortem([traceback])\n\n   Enter post-mortem debugging of the given *traceback* object.  If no\n   *traceback* is given, it uses the one of the exception that is\n   currently being handled (an exception must be being handled if the\n   default is to be used).\n\npdb.pm()\n\n   Enter post-mortem debugging of the traceback found in\n   "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name.  If you want\nto access further features, you have to do this yourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None)\n\n   "Pdb" is the debugger class.\n\n   The *completekey*, *stdin* and *stdout* arguments are passed to the\n   underlying "cmd.Cmd" class; see the description there.\n\n   The *skip* argument, if given, must be an iterable of glob-style\n   module name patterns.  The debugger will not step into frames that\n   originate in a module that matches one of these patterns. [1]\n\n   Example call to enable tracing with *skip*:\n\n      import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n   New in version 2.7: The *skip* argument.\n\n   run(statement[, globals[, locals]])\n   runeval(expression[, globals[, locals]])\n   runcall(function[, argument, ...])\n   set_trace()\n\n      See the documentation for the functions explained above.\n',
+ 'debugger': u'\n"pdb" --- The Python Debugger\n*****************************\n\n**Source code:** Lib/pdb.py\n\n======================================================================\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs.  It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame.  It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible --- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source.  The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n   >>> import pdb\n   >>> import mymodule\n   >>> pdb.run(\'mymodule.test()\')\n   > <string>(0)?()\n   (Pdb) continue\n   > <string>(1)?()\n   (Pdb) continue\n   NameError: \'spam\'\n   > <string>(1)?()\n   (Pdb)\n\n"pdb.py" can also be invoked as a script to debug other scripts.  For\nexample:\n\n   python -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 2.4: Restarting post-mortem behavior added.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n   import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger.  You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "c" command.\n\nThe typical usage to inspect a crashed program is:\n\n   >>> import pdb\n   >>> import mymodule\n   >>> mymodule.test()\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in ?\n     File "./mymodule.py", line 4, in test\n       test2()\n     File "./mymodule.py", line 3, in test2\n       print spam\n   NameError: spam\n   >>> pdb.pm()\n   > ./mymodule.py(3)test2()\n   -> print spam\n   (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement[, globals[, locals]])\n\n   Execute the *statement* (given as a string) under debugger control.\n   The debugger prompt appears before any code is executed; you can\n   set breakpoints and type "continue", or you can step through the\n   statement using "step" or "next" (all these commands are explained\n   below).  The optional *globals* and *locals* arguments specify the\n   environment in which the code is executed; by default the\n   dictionary of the module "__main__" is used.  (See the explanation\n   of the "exec" statement or the "eval()" built-in function.)\n\npdb.runeval(expression[, globals[, locals]])\n\n   Evaluate the *expression* (given as a string) under debugger\n   control.  When "runeval()" returns, it returns the value of the\n   expression.  Otherwise this function is similar to "run()".\n\npdb.runcall(function[, argument, ...])\n\n   Call the *function* (a function or method object, not a string)\n   with the given arguments.  When "runcall()" returns, it returns\n   whatever the function call returned.  The debugger prompt appears\n   as soon as the function is entered.\n\npdb.set_trace()\n\n   Enter the debugger at the calling stack frame.  This is useful to\n   hard-code a breakpoint at a given point in a program, even if the\n   code is not otherwise being debugged (e.g. when an assertion\n   fails).\n\npdb.post_mortem([traceback])\n\n   Enter post-mortem debugging of the given *traceback* object.  If no\n   *traceback* is given, it uses the one of the exception that is\n   currently being handled (an exception must be being handled if the\n   default is to be used).\n\npdb.pm()\n\n   Enter post-mortem debugging of the traceback found in\n   "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name.  If you want\nto access further features, you have to do this yourself:\n\nclass pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None)\n\n   "Pdb" is the debugger class.\n\n   The *completekey*, *stdin* and *stdout* arguments are passed to the\n   underlying "cmd.Cmd" class; see the description there.\n\n   The *skip* argument, if given, must be an iterable of glob-style\n   module name patterns.  The debugger will not step into frames that\n   originate in a module that matches one of these patterns. [1]\n\n   Example call to enable tracing with *skip*:\n\n      import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n   New in version 2.7: The *skip* argument.\n\n   run(statement[, globals[, locals]])\n   runeval(expression[, globals[, locals]])\n   runcall(function[, argument, ...])\n   set_trace()\n\n      See the documentation for the functions explained above.\n',
  'del': u'\nThe "del" statement\n*******************\n\n   del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name  from the local or\nglobal namespace, depending on whether the name occurs in a "global"\nstatement in the same code block.  If the name is unbound, a\n"NameError" exception will be raised.\n\nIt is illegal to delete a name from the local namespace if it occurs\nas a free variable in a nested block.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n',
  'dict': u'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n   dict_display       ::= "{" [key_datum_list | dict_comprehension] "}"\n   key_datum_list     ::= key_datum ("," key_datum)* [","]\n   key_datum          ::= expression ":" expression\n   dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum.  This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection The standard type hierarchy.  (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.)  Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n',
  'dynamic-features': u'\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name.  An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec".  (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames.  Names may be resolved in the local and global namespaces of\nthe caller.  Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace.  If only one namespace is\nspecified, it is used for both.\n',
@@ -42,7 +43,7 @@
  'identifiers': u'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions:\n\n   identifier ::= (letter|"_") (letter | digit | "_")*\n   letter     ::= lowercase | uppercase\n   lowercase  ::= "a"..."z"\n   uppercase  ::= "A"..."Z"\n   digit      ::= "0"..."9"\n\nIdentifiers are unlimited in length.  Case is significant.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers.  They must\nbe spelled exactly as written here:\n\n   and       del       from      not       while\n   as        elif      global    or        with\n   assert    else      if        pass      yield\n   break     except    import    print\n   class     exec      in        raise\n   continue  finally   is        return\n   def       for       lambda    try\n\nChanged in version 2.4: "None" became a constant and is now recognized\nby the compiler as a name for the built-in object "None".  Although it\nis not a keyword, you cannot assign a different object to it.\n\nChanged in version 2.5: Using "as" and "with" as identifiers triggers\na warning.  To use them as keywords, enable the "with_statement"\nfuture feature .\n\nChanged in version 2.6: "as" and "with" are full keywords.\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings.  These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n   Not imported by "from module import *".  The special identifier "_"\n   is used in the interactive interpreter to store the result of the\n   last evaluation; it is stored in the "__builtin__" module.  When\n   not in interactive mode, "_" has no special meaning and is not\n   defined. See section The import statement.\n\n   Note: The name "_" is often used in conjunction with\n     internationalization; refer to the documentation for the\n     "gettext" module for more information on this convention.\n\n"__*__"\n   System-defined names. These names are defined by the interpreter\n   and its implementation (including the standard library).  Current\n   system names are discussed in the Special method names section and\n   elsewhere.  More will likely be defined in future versions of\n   Python.  *Any* use of "__*__" names, in any context, that does not\n   follow explicitly documented use, is subject to breakage without\n   warning.\n\n"__*"\n   Class-private names.  Names in this category, when used within the\n   context of a class definition, are re-written to use a mangled form\n   to help avoid name clashes between "private" attributes of base and\n   derived classes. See section Identifiers (Names).\n',
  'if': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n   if_stmt ::= "if" expression ":" suite\n               ( "elif" expression ":" suite )*\n               ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n',
  'imaginary': u'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n   imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range.  To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., "(3+4j)".  Some examples of imaginary literals:\n\n   3.14j   10.j    10j     .001j   1e100j  3.14e-10j\n',
- 'import': u'\nThe "import" statement\n**********************\n\n   import_stmt     ::= "import" module ["as" name] ( "," module ["as" name] )*\n                   | "from" relative_module "import" identifier ["as" name]\n                   ( "," identifier ["as" name] )*\n                   | "from" relative_module "import" "(" identifier ["as" name]\n                   ( "," identifier ["as" name] )* [","] ")"\n                   | "from" module "import" "*"\n   module          ::= (identifier ".")* identifier\n   relative_module ::= "."* module | "."+\n   name            ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the "import" statement occurs). The\nstatement comes in two forms differing on whether it uses the "from"\nkeyword. The first form (without "from") repeats these steps for each\nidentifier in the list. The form with "from" performs step (1) once,\nand then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n"sys.modules", the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then "sys.meta_path" is\nsearched (the specification for "sys.meta_path" can be found in **PEP\n302**). The object is a list of *finder* objects which are queried in\norder as to whether they know how to load the module by calling their\n"find_module()" method with the name of the module. If the module\nhappens to be contained within a package (as denoted by the existence\nof a dot in the name), then a second argument to "find_module()" is\ngiven as the value of the "__path__" attribute from the parent package\n(everything up to the last dot in the name of the module being\nimported). If a finder can find the module it returns a *loader*\n(discussed later) or returns "None".\n\nIf none of the finders on "sys.meta_path" are able to find the module\nthen some implicitly defined finders are queried. Implementations of\nPython vary in what implicit meta path finders are defined. The one\nthey all do define, though, is one that handles "sys.path_hooks",\n"sys.path_importer_cache", and "sys.path".\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to "find_module()",\n"__path__" on the parent package, is used as the source of paths. If\nthe module is not contained in a package then "sys.path" is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n"sys.path_importer_cache" caches finders for paths and is checked for\na finder. If the path does not have a finder cached then\n"sys.path_hooks" is searched by calling each object in the list with a\nsingle argument of the path, returning a finder or raises\n"ImportError". If a finder is returned then it is cached in\n"sys.path_importer_cache" and then used for that path entry. If no\nfinder can be found but the path exists then a value of "None" is\nstored in "sys.path_importer_cache" to signify that an implicit, file-\nbased finder that handles modules stored as individual files should be\nused for that path. If the path does not exist then a finder which\nalways returns "None" is placed in the cache for the path.\n\nIf no finder can find the module then "ImportError" is raised.\nOtherwise some finder returned a loader whose "load_module()" method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin "sys.modules" (a possibility if the loader is called outside of the\nimport machinery) then it is to use that module for initialization and\nnot a new module. But if the module does not exist in "sys.modules"\nthen it is to be added to that dict before initialization begins. If\nan error occurs during loading of the module and it was added to\n"sys.modules" it is to be removed from the dict. If an error occurs\nbut the module was already in "sys.modules" it is left in the dict.\n\nThe loader must set several attributes on the module. "__name__" is to\nbe set to the name of the module. "__file__" is to be the "path" to\nthe file unless the module is built-in (and thus listed in\n"sys.builtin_module_names") in which case the attribute is not set. If\nwhat is being imported is a package then "__path__" is to be set to a\nlist of paths to be searched when looking for modules and packages\ncontained within the package being imported. "__package__" is optional\nbut should be set to the name of package that contains the module or\npackage (the empty string is used for module not contained in a\npackage). "__loader__" is also optional but should be set to the\nloader object that is loading the module.\n\nIf an error occurs during loading then the loader raises "ImportError"\nif some other exception is not already being propagated. Otherwise the\nloader returns the module that was loaded and initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of "import" statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any.  If the module name is followed by "as", the\nname following "as" is used as the local name for the module.\n\nThe "from" form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound.  As with the first form of "import", an alternate local name\ncan be supplied by specifying ""as" localname".  If a name is not\nfound, "ImportError" is raised.  If the list of identifiers is\nreplaced by a star ("\'*\'"), all public names defined in the module are\nbound in the local namespace of the "import" statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule.  The names given in "__all__" are all considered public and\nare required to exist.  If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'"). "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe "from" form with "*" may only occur in a module scope.  If the\nwild card form of import --- "import *" --- is used in a function and\nthe function contains or is a nested block with free variables, the\ncompiler will raise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python.  The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language.  It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n   future_statement ::= "from" "__future__" "import" feature ["as" name]\n                        ("," feature ["as" name])*\n                        | "from" "__future__" "import" "(" feature ["as" name]\n                        ("," feature ["as" name])* [","] ")"\n   feature          ::= identifier\n   name             ::= identifier\n\nA future statement must appear near the top of the module.  The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are "unicode_literals",\n"print_function", "absolute_import", "division", "generators",\n"nested_scopes" and "with_statement".  "generators", "with_statement",\n"nested_scopes" are redundant in Python version 2.6 and above because\nthey are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code.  It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently.  Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n   import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an "exec" statement or calls to the built-in\nfunctions "compile()" and "execfile()" that occur in a module "M"\ncontaining a future statement will, by default, use the new  syntax or\nsemantics associated with the future statement.  This can, starting\nwith Python 2.2 be controlled by optional arguments to "compile()" ---\nsee the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session.  If an\ninterpreter is started with the "-i" option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also: **PEP 236** - Back to the __future__\n\n     The original proposal for the __future__ mechanism.\n',
+ 'import': u'\nThe "import" statement\n**********************\n\n   import_stmt     ::= "import" module ["as" name] ( "," module ["as" name] )*\n                   | "from" relative_module "import" identifier ["as" name]\n                   ( "," identifier ["as" name] )*\n                   | "from" relative_module "import" "(" identifier ["as" name]\n                   ( "," identifier ["as" name] )* [","] ")"\n                   | "from" module "import" "*"\n   module          ::= (identifier ".")* identifier\n   relative_module ::= "."* module | "."+\n   name            ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the "import" statement occurs). The\nstatement comes in two forms differing on whether it uses the "from"\nkeyword. The first form (without "from") repeats these steps for each\nidentifier in the list. The form with "from" performs step (1) once,\nand then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n"sys.modules", the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then "sys.meta_path" is\nsearched (the specification for "sys.meta_path" can be found in **PEP\n302**). The object is a list of *finder* objects which are queried in\norder as to whether they know how to load the module by calling their\n"find_module()" method with the name of the module. If the module\nhappens to be contained within a package (as denoted by the existence\nof a dot in the name), then a second argument to "find_module()" is\ngiven as the value of the "__path__" attribute from the parent package\n(everything up to the last dot in the name of the module being\nimported). If a finder can find the module it returns a *loader*\n(discussed later) or returns "None".\n\nIf none of the finders on "sys.meta_path" are able to find the module\nthen some implicitly defined finders are queried. Implementations of\nPython vary in what implicit meta path finders are defined. The one\nthey all do define, though, is one that handles "sys.path_hooks",\n"sys.path_importer_cache", and "sys.path".\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to "find_module()",\n"__path__" on the parent package, is used as the source of paths. If\nthe module is not contained in a package then "sys.path" is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n"sys.path_importer_cache" caches finders for paths and is checked for\na finder. If the path does not have a finder cached then\n"sys.path_hooks" is searched by calling each object in the list with a\nsingle argument of the path, returning a finder or raises\n"ImportError". If a finder is returned then it is cached in\n"sys.path_importer_cache" and then used for that path entry. If no\nfinder can be found but the path exists then a value of "None" is\nstored in "sys.path_importer_cache" to signify that an implicit, file-\nbased finder that handles modules stored as individual files should be\nused for that path. If the path does not exist then a finder which\nalways returns "None" is placed in the cache for the path.\n\nIf no finder can find the module then "ImportError" is raised.\nOtherwise some finder returned a loader whose "load_module()" method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin "sys.modules" (a possibility if the loader is called outside of the\nimport machinery) then it is to use that module for initialization and\nnot a new module. But if the module does not exist in "sys.modules"\nthen it is to be added to that dict before initialization begins. If\nan error occurs during loading of the module and it was added to\n"sys.modules" it is to be removed from the dict. If an error occurs\nbut the module was already in "sys.modules" it is left in the dict.\n\nThe loader must set several attributes on the module. "__name__" is to\nbe set to the name of the module. "__file__" is to be the "path" to\nthe file unless the module is built-in (and thus listed in\n"sys.builtin_module_names") in which case the attribute is not set. If\nwhat is being imported is a package then "__path__" is to be set to a\nlist of paths to be searched when looking for modules and packages\ncontained within the package being imported. "__package__" is optional\nbut should be set to the name of package that contains the module or\npackage (the empty string is used for module not contained in a\npackage). "__loader__" is also optional but should be set to the\nloader object that is loading the module.\n\nIf an error occurs during loading then the loader raises "ImportError"\nif some other exception is not already being propagated. Otherwise the\nloader returns the module that was loaded and initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of "import" statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any.  If the module name is followed by "as", the\nname following "as" is used as the local name for the module.\n\nThe "from" form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound.  As with the first form of "import", an alternate local name\ncan be supplied by specifying ""as" localname".  If a name is not\nfound, "ImportError" is raised.  If the list of identifiers is\nreplaced by a star ("\'*\'"), all public names defined in the module are\nbound in the local namespace of the "import" statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule.  The names given in "__all__" are all considered public and\nare required to exist.  If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'"). "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe "from" form with "*" may only occur in a module scope.  If the\nwild card form of import --- "import *" --- is used in a function and\nthe function contains or is a nested block with free variables, the\ncompiler will raise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python.  The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language.  It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n   future_statement ::= "from" "__future__" "import" feature ["as" name]\n                        ("," feature ["as" name])*\n                        | "from" "__future__" "import" "(" feature ["as" name]\n                        ("," feature ["as" name])* [","] ")"\n   feature          ::= identifier\n   name             ::= identifier\n\nA future statement must appear near the top of the module.  The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are "unicode_literals",\n"print_function", "absolute_import", "division", "generators",\n"nested_scopes" and "with_statement".  "generators", "with_statement",\n"nested_scopes" are redundant in Python version 2.6 and above because\nthey are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code.  It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently.  Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n   import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an "exec" statement or calls to the built-in\nfunctions "compile()" and "execfile()" that occur in a module "M"\ncontaining a future statement will, by default, use the new  syntax or\nsemantics associated with the future statement.  This can, starting\nwith Python 2.2 be controlled by optional arguments to "compile()" ---\nsee the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session.  If an\ninterpreter is started with the "-i" option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n  **PEP 236** - Back to the __future__\n     The original proposal for the __future__ mechanism.\n',
  'in': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation.  Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n   comparison    ::= or_expr ( comp_operator or_expr )*\n   comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n                     | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe forms "<>" and "!=" are equivalent; for consistency with C, "!="\nis preferred; where "!=" is mentioned below "<>" is also accepted.\nThe "<>" spelling is considered obsolescent.\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects.  The objects need not have the same type. If both are\nnumbers, they are converted to a common type.  Otherwise, objects of\ndifferent types *always* compare unequal, and are ordered consistently\nbut arbitrarily. You can control comparison behavior of objects of\nnon-built-in types by defining a "__cmp__" method or rich comparison\nmethods like "__gt__", described in section Special method names.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the "in" and "not in"\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric\n  equivalents (the result of the built-in function "ord()") of their\n  characters. Unicode and 8-bit strings are fully interoperable in\n  this behavior. [4]\n\n* Tuples and lists are compared lexicographically using comparison\n  of corresponding elements.  This means that to compare equal, each\n  element must compare equal and the two sequences must be of the same\n  type and have the same length.\n\n  If not equal, the sequences are ordered the same as their first\n  differing elements.  For example, "cmp([1,2,x], [1,2,y])" returns\n  the same as "cmp(x,y)".  If the corresponding element does not\n  exist, the shorter sequence is ordered first (for example, "[1,2] <\n  [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n  (key, value) lists compare equal. [5] Outcomes other than equality\n  are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they\n  are the same object; the choice whether one object is considered\n  smaller or larger than another one is made arbitrarily but\n  consistently within one execution of a program.\n\nThe operators "in" and "not in" test for collection membership.  "x in\ns" evaluates to true if *x* is a member of the collection *s*, and\nfalse otherwise.  "x not in s" returns the negation of "x in s". The\ncollection membership test has traditionally been bound to sequences;\nan object is a member of a collection if the collection is a sequence\nand contains an element equal to that object.  However, it make sense\nfor many other object types to support membership tests without being\na sequence.  In particular, dictionaries (for keys) and sets support\nmembership testing.\n\nFor the list and tuple types, "x in y" is true if and only if there\nexists an index *i* such that "x == y[i]" is true.\n\nFor the Unicode and string types, "x in y" is true if and only if *x*\nis a substring of *y*.  An equivalent test is "y.find(x) != -1".\nNote, *x* and *y* need not be the same type; consequently, "u\'ab\' in\n\'abc\'" will return "True". Empty strings are always considered to be a\nsubstring of any other string, so """ in "abc"" will return "True".\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength "1".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y".  If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object.  "x is not y"\nyields the inverse truth value. [7]\n',
  'integers': u'\nInteger and long integer literals\n*********************************\n\nInteger and long integer literals are described by the following\nlexical definitions:\n\n   longinteger    ::= integer ("l" | "L")\n   integer        ::= decimalinteger | octinteger | hexinteger | bininteger\n   decimalinteger ::= nonzerodigit digit* | "0"\n   octinteger     ::= "0" ("o" | "O") octdigit+ | "0" octdigit+\n   hexinteger     ::= "0" ("x" | "X") hexdigit+\n   bininteger     ::= "0" ("b" | "B") bindigit+\n   nonzerodigit   ::= "1"..."9"\n   octdigit       ::= "0"..."7"\n   bindigit       ::= "0" | "1"\n   hexdigit       ::= digit | "a"..."f" | "A"..."F"\n\nAlthough both lower case "\'l\'" and upper case "\'L\'" are allowed as\nsuffix for long integers, it is strongly recommended to always use\n"\'L\'", since the letter "\'l\'" looks too much like the digit "\'1\'".\n\nPlain integer literals that are above the largest representable plain\ninteger (e.g., 2147483647 when using 32-bit arithmetic) are accepted\nas if they were long integers instead. [1]  There is no limit for long\ninteger literals apart from what can be stored in available memory.\n\nSome examples of plain integer literals (first row) and long integer\nliterals (second and third rows):\n\n   7     2147483647                        0177\n   3L    79228162514264337593543950336L    0377L   0x100000000L\n         79228162514264337593543950336             0xdeadbeef\n',
  'lambda': u'\nLambdas\n*******\n\n   lambda_expr     ::= "lambda" [parameter_list]: expression\n   old_lambda_expr ::= "lambda" [parameter_list]: old_expression\n\nLambda expressions (sometimes called lambda forms) have the same\nsyntactic position as expressions.  They are a shorthand to create\nanonymous functions; the expression "lambda arguments: expression"\nyields a function object.  The unnamed object behaves like a function\nobject defined with\n\n   def name(arguments):\n       return expression\n\nSee section Function definitions for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements.\n',
@@ -61,20 +62,20 @@
  'shifting': u'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n   shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept plain or long integers as arguments.  The\narguments are converted to a common type.  They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by "pow(2, n)".  A\nleft shift by *n* bits is defined as multiplication with "pow(2, n)".\nNegative shift counts raise a "ValueError" exception.\n\nNote: In the current implementation, the right-hand operand is\n  required to be at most "sys.maxsize".  If the right-hand operand is\n  larger than "sys.maxsize" an "OverflowError" exception is raised.\n',
  'slicings': u'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list).  Slicings may be used as expressions or as\ntargets in assignment or "del" statements.  The syntax for a slicing:\n\n   slicing          ::= simple_slicing | extended_slicing\n   simple_slicing   ::= primary "[" short_slice "]"\n   extended_slicing ::= primary "[" slice_list "]"\n   slice_list       ::= slice_item ("," slice_item)* [","]\n   slice_item       ::= expression | proper_slice | ellipsis\n   proper_slice     ::= short_slice | long_slice\n   short_slice      ::= [lower_bound] ":" [upper_bound]\n   long_slice       ::= short_slice ":" [stride]\n   lower_bound      ::= expression\n   upper_bound      ::= expression\n   stride           ::= expression\n   ellipsis         ::= "..."\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing.  Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice nor ellipses).  Similarly, when the slice\nlist has exactly one short slice and no trailing comma, the\ninterpretation as a simple slicing takes priority over that as an\nextended slicing.\n\nThe semantics for a simple slicing are as follows.  The primary must\nevaluate to a sequence object.  The lower and upper bound expressions,\nif present, must evaluate to plain integers; defaults are zero and the\n"sys.maxint", respectively.  If either bound is negative, the\nsequence\'s length is added to it.  The slicing now selects all items\nwith index *k* such that "i <= k < j" where *i* and *j* are the\nspecified lower and upper bounds.  This may be an empty sequence.  It\nis not an error if *i* or *j* lie outside the range of valid indexes\n(such items don\'t exist so they aren\'t selected).\n\nThe semantics for an extended slicing are as follows.  The primary\nmust evaluate to a mapping object, and it is indexed with a key that\nis constructed from the slice list, as follows.  If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key.  The conversion of a slice item that is an\nexpression is that expression.  The conversion of an ellipsis slice\nitem is the built-in "Ellipsis" object.  The conversion of a proper\nslice is a slice object (see section The standard type hierarchy)\nwhose "start", "stop" and "step" attributes are the values of the\nexpressions given as lower bound, upper bound and stride,\nrespectively, substituting "None" for missing expressions.\n',
  'specialattrs': u'\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant.  Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n   A dictionary or other mapping object used to store an object\'s\n   (writable) attributes.\n\nobject.__methods__\n\n   Deprecated since version 2.2: Use the built-in function "dir()" to\n   get a list of an object\'s attributes. This attribute is no longer\n   available.\n\nobject.__members__\n\n   Deprecated since version 2.2: Use the built-in function "dir()" to\n   get a list of an object\'s attributes. This attribute is no longer\n   available.\n\ninstance.__class__\n\n   The class to which a class instance belongs.\n\nclass.__bases__\n\n   The tuple of base classes of a class object.\n\nclass.__name__\n\n   The name of the class or type.\n\nThe following attributes are only supported by *new-style class*es.\n\nclass.__mro__\n\n   This attribute is a tuple of classes that are considered when\n   looking for base classes during method resolution.\n\nclass.mro()\n\n   This method can be overridden by a metaclass to customize the\n   method resolution order for its instances.  It is called at class\n   instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n   Each new-style class keeps a list of weak references to its\n   immediate subclasses.  This method returns a list of all those\n   references still alive. Example:\n\n      >>> int.__subclasses__()\n      [<type \'bool\'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found\n    in the Python Reference Manual (Basic customization).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to\n    "[1.0, 2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n    operands.\n\n[4] Cased characters are those with general category property\n    being one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase),\n    or "Lt" (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a\n    singleton tuple whose only element is the tuple to be formatted.\n\n[6] The advantage of leaving the newline on is that returning an\n    empty string is then an unambiguous EOF indication.  It is also\n    possible (in cases where it might matter, for example, if you want\n    to make an exact copy of a file while scanning its lines) to tell\n    whether the last line of a file ended in a newline or not (yes\n    this happens!).\n',
- 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators.  For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "x.__getitem__(i)" for old-style\nclasses and "type(x).__getitem__(x, i)" for new-style classes.  Except\nwhere mentioned, attempts to execute an operation raise an exception\nwhen no appropriate method is defined (typically "AttributeError" or\n"TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled.  For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense.  (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n   Called to create a new instance of class *cls*.  "__new__()" is a\n   static method (special-cased so you need not declare it as such)\n   that takes the class of which an instance was requested as its\n   first argument.  The remaining arguments are those passed to the\n   object constructor expression (the call to the class).  The return\n   value of "__new__()" should be the new object instance (usually an\n   instance of *cls*).\n\n   Typical implementations create a new instance of the class by\n   invoking the superclass\'s "__new__()" method using\n   "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n   arguments and then modifying the newly-created instance as\n   necessary before returning it.\n\n   If "__new__()" returns an instance of *cls*, then the new\n   instance\'s "__init__()" method will be invoked like\n   "__init__(self[, ...])", where *self* is the new instance and the\n   remaining arguments are the same as were passed to "__new__()".\n\n   If "__new__()" does not return an instance of *cls*, then the new\n   instance\'s "__init__()" method will not be invoked.\n\n   "__new__()" is intended mainly to allow subclasses of immutable\n   types (like int, str, or tuple) to customize instance creation.  It\n   is also commonly overridden in custom metaclasses in order to\n   customize class creation.\n\nobject.__init__(self[, ...])\n\n   Called after the instance has been created (by "__new__()"), but\n   before it is returned to the caller.  The arguments are those\n   passed to the class constructor expression.  If a base class has an\n   "__init__()" method, the derived class\'s "__init__()" method, if\n   any, must explicitly call it to ensure proper initialization of the\n   base class part of the instance; for example:\n   "BaseClass.__init__(self, [args...])".\n\n   Because "__new__()" and "__init__()" work together in constructing\n   objects ("__new__()" to create it, and "__init__()" to customise\n   it), no non-"None" value may be returned by "__init__()"; doing so\n   will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n   Called when the instance is about to be destroyed.  This is also\n   called a destructor.  If a base class has a "__del__()" method, the\n   derived class\'s "__del__()" method, if any, must explicitly call it\n   to ensure proper deletion of the base class part of the instance.\n   Note that it is possible (though not recommended!) for the\n   "__del__()" method to postpone destruction of the instance by\n   creating a new reference to it.  It may then be called at a later\n   time when this new reference is deleted.  It is not guaranteed that\n   "__del__()" methods are called for objects that still exist when\n   the interpreter exits.\n\n   Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n     decrements the reference count for "x" by one, and the latter is\n     only called when "x"\'s reference count reaches zero.  Some common\n     situations that may prevent the reference count of an object from\n     going to zero include: circular references between objects (e.g.,\n     a doubly-linked list or a tree data structure with parent and\n     child pointers); a reference to the object on the stack frame of\n     a function that caught an exception (the traceback stored in\n     "sys.exc_traceback" keeps the stack frame alive); or a reference\n     to the object on the stack frame that raised an unhandled\n     exception in interactive mode (the traceback stored in\n     "sys.last_traceback" keeps the stack frame alive).  The first\n     situation can only be remedied by explicitly breaking the cycles;\n     the latter two situations can be resolved by storing "None" in\n     "sys.exc_traceback" or "sys.last_traceback".  Circular references\n     which are garbage are detected when the option cycle detector is\n     enabled (it\'s on by default), but can only be cleaned up if there\n     are no Python-level "__del__()" methods involved. Refer to the\n     documentation for the "gc" module for more information about how\n     "__del__()" methods are handled by the cycle detector,\n     particularly the description of the "garbage" value.\n\n   Warning: Due to the precarious circumstances under which\n     "__del__()" methods are invoked, exceptions that occur during\n     their execution are ignored, and a warning is printed to\n     "sys.stderr" instead. Also, when "__del__()" is invoked in\n     response to a module being deleted (e.g., when execution of the\n     program is done), other globals referenced by the "__del__()"\n     method may already have been deleted or in the process of being\n     torn down (e.g. the import machinery shutting down).  For this\n     reason, "__del__()" methods should do the absolute minimum needed\n     to maintain external invariants.  Starting with version 1.5,\n     Python guarantees that globals whose name begins with a single\n     underscore are deleted from their module before other globals are\n     deleted; if no other references to such globals exist, this may\n     help in assuring that imported modules are still available at the\n     time when the "__del__()" method is called.\n\n   See also the "-R" command-line option.\n\nobject.__repr__(self)\n\n   Called by the "repr()" built-in function and by string conversions\n   (reverse quotes) to compute the "official" string representation of\n   an object.  If at all possible, this should look like a valid\n   Python expression that could be used to recreate an object with the\n   same value (given an appropriate environment).  If this is not\n   possible, a string of the form "<...some useful description...>"\n   should be returned.  The return value must be a string object. If a\n   class defines "__repr__()" but not "__str__()", then "__repr__()"\n   is also used when an "informal" string representation of instances\n   of that class is required.\n\n   This is typically used for debugging, so it is important that the\n   representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n   Called by the "str()" built-in function and by the "print"\n   statement to compute the "informal" string representation of an\n   object.  This differs from "__repr__()" in that it does not have to\n   be a valid Python expression: a more convenient or concise\n   representation may be used instead. The return value must be a\n   string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n   New in version 2.1.\n\n   These are the so-called "rich comparison" methods, and are called\n   for comparison operators in preference to "__cmp__()" below. The\n   correspondence between operator symbols and method names is as\n   follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n   "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n   "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n   A rich comparison method may return the singleton "NotImplemented"\n   if it does not implement the operation for a given pair of\n   arguments. By convention, "False" and "True" are returned for a\n   successful comparison. However, these methods can return any value,\n   so if the comparison operator is used in a Boolean context (e.g.,\n   in the condition of an "if" statement), Python will call "bool()"\n   on the value to determine if the result is true or false.\n\n   There are no implied relationships among the comparison operators.\n   The truth of "x==y" does not imply that "x!=y" is false.\n   Accordingly, when defining "__eq__()", one should also define\n   "__ne__()" so that the operators will behave as expected.  See the\n   paragraph on "__hash__()" for some important notes on creating\n   *hashable* objects which support custom comparison operations and\n   are usable as dictionary keys.\n\n   There are no swapped-argument versions of these methods (to be used\n   when the left argument does not support the operation but the right\n   argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n   reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n   and "__eq__()" and "__ne__()" are their own reflection.\n\n   Arguments to rich comparison methods are never coerced.\n\n   To automatically generate ordering operations from a single root\n   operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n   Called by comparison operations if rich comparison (see above) is\n   not defined.  Should return a negative integer if "self < other",\n   zero if "self == other", a positive integer if "self > other".  If\n   no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n   class instances are compared by object identity ("address").  See\n   also the description of "__hash__()" for some important notes on\n   creating *hashable* objects which support custom comparison\n   operations and are usable as dictionary keys. (Note: the\n   restriction that exceptions are not propagated by "__cmp__()" has\n   been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n   Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n   Called by built-in function "hash()" and for operations on members\n   of hashed collections including "set", "frozenset", and "dict".\n   "__hash__()" should return an integer.  The only required property\n   is that objects which compare equal have the same hash value; it is\n   advised to somehow mix together (e.g. using exclusive or) the hash\n   values for the components of the object that also play a part in\n   comparison of objects.\n\n   If a class does not define a "__cmp__()" or "__eq__()" method it\n   should not define a "__hash__()" operation either; if it defines\n   "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n   not be usable in hashed collections.  If a class defines mutable\n   objects and implements a "__cmp__()" or "__eq__()" method, it\n   should not implement "__hash__()", since hashable collection\n   implementations require that a object\'s hash value is immutable (if\n   the object\'s hash value changes, it will be in the wrong hash\n   bucket).\n\n   User-defined classes have "__cmp__()" and "__hash__()" methods by\n   default; with them, all objects compare unequal (except with\n   themselves) and "x.__hash__()" returns a result derived from\n   "id(x)".\n\n   Classes which inherit a "__hash__()" method from a parent class but\n   change the meaning of "__cmp__()" or "__eq__()" such that the hash\n   value returned is no longer appropriate (e.g. by switching to a\n   value-based concept of equality instead of the default identity\n   based equality) can explicitly flag themselves as being unhashable\n   by setting "__hash__ = None" in the class definition. Doing so\n   means that not only will instances of the class raise an\n   appropriate "TypeError" when a program attempts to retrieve their\n   hash value, but they will also be correctly identified as\n   unhashable when checking "isinstance(obj, collections.Hashable)"\n   (unlike classes which define their own "__hash__()" to explicitly\n   raise "TypeError").\n\n   Changed in version 2.5: "__hash__()" may now also return a long\n   integer object; the 32-bit integer is then derived from the hash of\n   that object.\n\n   Changed in version 2.6: "__hash__" may now be set to "None" to\n   explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n   Called to implement truth value testing and the built-in operation\n   "bool()"; should return "False" or "True", or their integer\n   equivalents "0" or "1".  When this method is not defined,\n   "__len__()" is called, if it is defined, and the object is\n   considered true if its result is nonzero. If a class defines\n   neither "__len__()" nor "__nonzero__()", all its instances are\n   considered true.\n\nobject.__unicode__(self)\n\n   Called to implement "unicode()" built-in; should return a Unicode\n   object. When this method is not defined, string conversion is\n   attempted, and the result of string conversion is converted to\n   Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n   Called when an attribute lookup has not found the attribute in the\n   usual places (i.e. it is not an instance attribute nor is it found\n   in the class tree for "self").  "name" is the attribute name. This\n   method should return the (computed) attribute value or raise an\n   "AttributeError" exception.\n\n   Note that if the attribute is found through the normal mechanism,\n   "__getattr__()" is not called.  (This is an intentional asymmetry\n   between "__getattr__()" and "__setattr__()".) This is done both for\n   efficiency reasons and because otherwise "__getattr__()" would have\n   no way to access other attributes of the instance.  Note that at\n   least for instance variables, you can fake total control by not\n   inserting any values in the instance attribute dictionary (but\n   instead inserting them in another object).  See the\n   "__getattribute__()" method below for a way to actually get total\n   control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n   Called when an attribute assignment is attempted.  This is called\n   instead of the normal mechanism (i.e. store the value in the\n   instance dictionary).  *name* is the attribute name, *value* is the\n   value to be assigned to it.\n\n   If "__setattr__()" wants to assign to an instance attribute, it\n   should not simply execute "self.name = value" --- this would cause\n   a recursive call to itself.  Instead, it should insert the value in\n   the dictionary of instance attributes, e.g., "self.__dict__[name] =\n   value".  For new-style classes, rather than accessing the instance\n   dictionary, it should call the base class method with the same\n   name, for example, "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n   Like "__setattr__()" but for attribute deletion instead of\n   assignment.  This should only be implemented if "del obj.name" is\n   meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n   Called unconditionally to implement attribute accesses for\n   instances of the class. If the class also defines "__getattr__()",\n   the latter will not be called unless "__getattribute__()" either\n   calls it explicitly or raises an "AttributeError". This method\n   should return the (computed) attribute value or raise an\n   "AttributeError" exception. In order to avoid infinite recursion in\n   this method, its implementation should always call the base class\n   method with the same name to access any attributes it needs, for\n   example, "object.__getattribute__(self, name)".\n\n   Note: This method may still be bypassed when looking up special\n     methods as the result of implicit invocation via language syntax\n     or built-in functions. See Special method lookup for new-style\n     classes.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents).  In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n   Called to get the attribute of the owner class (class attribute\n   access) or of an instance of that class (instance attribute\n   access). *owner* is always the owner class, while *instance* is the\n   instance that the attribute was accessed through, or "None" when\n   the attribute is accessed through the *owner*.  This method should\n   return the (computed) attribute value or raise an "AttributeError"\n   exception.\n\nobject.__set__(self, instance, value)\n\n   Called to set the attribute on an instance *instance* of the owner\n   class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n   Called to delete the attribute on an instance *instance* of the\n   owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol:  "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead.  Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.  Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass "object()" or "type()").\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n   The simplest and least common call is when user code directly\n   invokes a descriptor method:    "x.__get__(a)".\n\nInstance Binding\n   If binding to a new-style object instance, "a.x" is transformed\n   into the call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n   If binding to a new-style class, "A.x" is transformed into the\n   call: "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n   If "a" is an instance of "super", then the binding "super(B,\n   obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n   immediately preceding "B" and then invokes the descriptor with the\n   call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined.  A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()".  If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary.  If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor.  Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method.  Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary.  In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors.  Accordingly, instances can\nredefine and override methods.  This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage.  This wastes space for objects\nhaving very few instance variables.  The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition.  The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable.  Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n   This class variable can be assigned a string, iterable, or sequence\n   of strings with variable names used by instances.  If defined in a\n   new-style class, *__slots__* reserves space for the declared\n   variables and prevents the automatic creation of *__dict__* and\n   *__weakref__* for each instance.\n\n   New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n  attribute of that class will always be accessible, so a *__slots__*\n  definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n  variables not listed in the *__slots__* definition.  Attempts to\n  assign to an unlisted variable name raises "AttributeError". If\n  dynamic assignment of new variables is desired, then add\n  "\'__dict__\'" to the sequence of strings in the *__slots__*\n  declaration.\n\n  Changed in version 2.3: Previously, adding "\'__dict__\'" to the\n  *__slots__* declaration would not enable the assignment of new\n  attributes not specifically listed in the sequence of instance\n  variable names.\n\n* Without a *__weakref__* variable for each instance, classes\n  defining *__slots__* do not support weak references to its\n  instances. If weak reference support is needed, then add\n  "\'__weakref__\'" to the sequence of strings in the *__slots__*\n  declaration.\n\n  Changed in version 2.3: Previously, adding "\'__weakref__\'" to the\n  *__slots__* declaration would not enable support for weak\n  references.\n\n* *__slots__* are implemented at the class level by creating\n  descriptors (Implementing Descriptors) for each variable name.  As a\n  result, class attributes cannot be used to set default values for\n  instance variables defined by *__slots__*; otherwise, the class\n  attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n  where it is defined.  As a result, subclasses will have a *__dict__*\n  unless they also define *__slots__* (which must only contain names\n  of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n  instance variable defined by the base class slot is inaccessible\n  (except by retrieving its descriptor directly from the base class).\n  This renders the meaning of the program undefined.  In the future, a\n  check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n  "variable-length" built-in types such as "long", "str" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n  may also be used; however, in the future, special meaning may be\n  assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n  *__slots__*.\n\n  Changed in version 2.6: Previously, *__class__* assignment raised an\n  error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using "type()". A class\ndefinition is read into a separate namespace and the value of class\nname is bound to the result of "type(name, bases, dict)".\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of "type()". This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing\n  the role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s "__new__()"\nmethod -- "type.__new__()" can then be called from this method to\ncreate a class with different properties.  This example adds a new\nelement to the class dictionary before creating the class:\n\n   class metacls(type):\n       def __new__(mcs, name, bases, dict):\n           dict[\'foo\'] = \'metacls was here\'\n           return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom "__call__()" method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n   This variable can be any callable accepting arguments for "name",\n   "bases", and "dict".  Upon class creation, the callable is used\n   instead of the built-in "type()".\n\n   New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If "dict[\'__metaclass__\']" exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n  used (this looks for a *__class__* attribute first and if not found,\n  uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n  used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n  used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n   Return true if *instance* should be considered a (direct or\n   indirect) instance of *class*. If defined, called to implement\n   "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n   Return true if *subclass* should be considered a (direct or\n   indirect) subclass of *class*.  If defined, called to implement\n   "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass.  They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also: **PEP 3119** - Introducing Abstract Base Classes\n\n     Includes the specification for customizing "isinstance()" and\n     "issubclass()" behavior through "__instancecheck__()" and\n     "__subclasscheck__()", with motivation for this functionality in\n     the context of adding Abstract Base Classes (see the "abc"\n     module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n   Called when the instance is "called" as a function; if this method\n   is defined, "x(arg1, arg2, ...)" is a shorthand for\n   "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well.  The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. (For backwards compatibility, the method\n"__getslice__()" (see below) can also be defined to handle simple, but\nnot extended slices.) It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "has_key()", "get()",\n"clear()", "setdefault()", "iterkeys()", "itervalues()",\n"iteritems()", "pop()", "popitem()", "copy()", and "update()" behaving\nsimilar to those for Python\'s standard dictionary objects.  The\n"UserDict" module provides a "DictMixin" class to help create those\nmethods from a base set of "__getitem__()", "__setitem__()",\n"__delitem__()", and "keys()". Mutable sequences should provide\nmethods "append()", "count()", "index()", "extend()", "insert()",\n"pop()", "remove()", "reverse()" and "sort()", like Python standard\nlist objects.  Finally, sequence types should implement addition\n(meaning concatenation) and multiplication (meaning repetition) by\ndefining the methods "__add__()", "__radd__()", "__iadd__()",\n"__mul__()", "__rmul__()" and "__imul__()" described below; they\nshould not define "__coerce__()" or other numerical operators.  It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should be equivalent of "has_key()"; for sequences,\nit should search through the values.  It is further recommended that\nboth mappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "iterkeys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n   Called to implement the built-in function "len()".  Should return\n   the length of the object, an integer ">=" 0.  Also, an object that\n   doesn\'t define a "__nonzero__()" method and whose "__len__()"\n   method returns zero is considered to be false in a Boolean context.\n\nobject.__getitem__(self, key)\n\n   Called to implement evaluation of "self[key]". For sequence types,\n   the accepted keys should be integers and slice objects.  Note that\n   the special interpretation of negative indexes (if the class wishes\n   to emulate a sequence type) is up to the "__getitem__()" method. If\n   *key* is of an inappropriate type, "TypeError" may be raised; if of\n   a value outside the set of indexes for the sequence (after any\n   special interpretation of negative values), "IndexError" should be\n   raised. For mapping types, if *key* is missing (not in the\n   container), "KeyError" should be raised.\n\n   Note: "for" loops expect that an "IndexError" will be raised for\n     illegal indexes to allow proper detection of the end of the\n     sequence.\n\nobject.__missing__(self, key)\n\n   Called by "dict"."__getitem__()" to implement "self[key]" for dict\n   subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n   Called to implement assignment to "self[key]".  Same note as for\n   "__getitem__()".  This should only be implemented for mappings if\n   the objects support changes to the values for keys, or if new keys\n   can be added, or for sequences if elements can be replaced.  The\n   same exceptions should be raised for improper *key* values as for\n   the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n   Called to implement deletion of "self[key]".  Same note as for\n   "__getitem__()".  This should only be implemented for mappings if\n   the objects support removal of keys, or for sequences if elements\n   can be removed from the sequence.  The same exceptions should be\n   raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n   This method is called when an iterator is required for a container.\n   This method should return a new iterator object that can iterate\n   over all the objects in the container.  For mappings, it should\n   iterate over the keys of the container, and should also be made\n   available as the method "iterkeys()".\n\n   Iterator objects also need to implement this method; they are\n   required to return themselves.  For more information on iterator\n   objects, see Iterator Types.\n\nobject.__reversed__(self)\n\n   Called (if present) by the "reversed()" built-in to implement\n   reverse iteration.  It should return a new iterator object that\n   iterates over all the objects in the container in reverse order.\n\n   If the "__reversed__()" method is not provided, the "reversed()"\n   built-in will fall back to using the sequence protocol ("__len__()"\n   and "__getitem__()").  Objects that support the sequence protocol\n   should only provide "__reversed__()" if they can provide an\n   implementation that is more efficient than the one provided by\n   "reversed()".\n\n   New in version 2.6.\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence.  However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n   Called to implement membership test operators.  Should return true\n   if *item* is in *self*, false otherwise.  For mapping objects, this\n   should consider the keys of the mapping rather than the values or\n   the key-item pairs.\n\n   For objects that don\'t define "__contains__()", the membership test\n   first tries iteration via "__iter__()", then the old sequence\n   iteration protocol via "__getitem__()", see this section in the\n   language reference.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects.  Immutable sequences methods should at most only\ndefine "__getslice__()"; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n   Deprecated since version 2.0: Support slice objects as parameters\n   to the "__getitem__()" method. (However, built-in types in CPython\n   currently still implement "__getslice__()".  Therefore, you have to\n   override it in derived classes when implementing slicing.)\n\n   Called to implement evaluation of "self[i:j]". The returned object\n   should be of the same type as *self*.  Note that missing *i* or *j*\n   in the slice expression are replaced by zero or "sys.maxsize",\n   respectively.  If negative indexes are used in the slice, the\n   length of the sequence is added to that index. If the instance does\n   not implement the "__len__()" method, an "AttributeError" is\n   raised. No guarantee is made that indexes adjusted this way are not\n   still negative.  Indexes which are greater than the length of the\n   sequence are not modified. If no "__getslice__()" is found, a slice\n   object is created instead, and passed to "__getitem__()" instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n   Called to implement assignment to "self[i:j]". Same notes for *i*\n   and *j* as for "__getslice__()".\n\n   This method is deprecated. If no "__setslice__()" is found, or for\n   extended slicing of the form "self[i:j:k]", a slice object is\n   created, and passed to "__setitem__()", instead of "__setslice__()"\n   being called.\n\nobject.__delslice__(self, i, j)\n\n   Called to implement deletion of "self[i:j]". Same notes for *i* and\n   *j* as for "__getslice__()". This method is deprecated. If no\n   "__delslice__()" is found, or for extended slicing of the form\n   "self[i:j:k]", a slice object is created, and passed to\n   "__delitem__()", instead of "__delslice__()" being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available.  For slice\noperations involving extended slice notation, or in absence of the\nslice methods, "__getitem__()", "__setitem__()" or "__delitem__()" is\ncalled with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n"__getitem__()", "__setitem__()" and "__delitem__()" support slice\nobjects as arguments):\n\n   class MyClass:\n       ...\n       def __getitem__(self, index):\n           ...\n       def __setitem__(self, index, value):\n           ...\n       def __delitem__(self, index):\n           ...\n\n       if sys.version_info < (2, 0):\n           # They won\'t be defined if version is at least 2.0 final\n\n           def __getslice__(self, i, j):\n               return self[max(0, i):max(0, j):]\n           def __setslice__(self, i, j, seq):\n               self[max(0, i):max(0, j):] = seq\n           def __delslice__(self, i, j):\n               del self[max(0, i):max(0, j):]\n       ...\n\nNote the calls to "max()"; these are necessary because of the handling\nof negative indices before the "__*slice__()" methods are called.\nWhen negative indexes are used, the "__*item__()" methods receive them\nas provided, but the "__*slice__()" methods get a "cooked" form of the\nindex values.  For each negative index value, the length of the\nsequence is added to the index before calling the method (which may\nstill result in a negative index); this is the customary handling of\nnegative indexes by the built-in sequence types, and the "__*item__()"\nmethods are expected to do this as well.  However, since they should\nalready be doing that, negative indexes cannot be passed in; they must\nbe constrained to the bounds of the sequence before being passed to\nthe "__*item__()" methods. Calling "max(0, i)" conveniently returns\nthe proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n   These methods are called to implement the binary arithmetic\n   operations ("+", "-", "*", "//", "%", "divmod()", "pow()", "**",\n   "<<", ">>", "&", "^", "|").  For instance, to evaluate the\n   expression "x + y", where *x* is an instance of a class that has an\n   "__add__()" method, "x.__add__(y)" is called.  The "__divmod__()"\n   method should be the equivalent to using "__floordiv__()" and\n   "__mod__()"; it should not be related to "__truediv__()" (described\n   below).  Note that "__pow__()" should be defined to accept an\n   optional third argument if the ternary version of the built-in\n   "pow()" function is to be supported.\n\n   If one of those methods does not support the operation with the\n   supplied arguments, it should return "NotImplemented".\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n   The division operator ("/") is implemented by these methods.  The\n   "__truediv__()" method is used when "__future__.division" is in\n   effect, otherwise "__div__()" is used.  If only one of these two\n   methods is defined, the object will not support division in the\n   alternate context; "TypeError" will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n   These methods are called to implement the binary arithmetic\n   operations ("+", "-", "*", "/", "%", "divmod()", "pow()", "**",\n   "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n   These functions are only called if the left operand does not\n   support the corresponding operation and the operands are of\n   different types. [2] For instance, to evaluate the expression "x -\n   y", where *y* is an instance of a class that has an "__rsub__()"\n   method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n   *NotImplemented*.\n\n   Note that ternary "pow()" will not try calling "__rpow__()" (the\n   coercion rules would become too complicated).\n\n   Note: If the right operand\'s type is a subclass of the left\n     operand\'s type and that subclass provides the reflected method\n     for the operation, this method will be called before the left\n     operand\'s non-reflected method.  This behavior allows subclasses\n     to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n   These methods are called to implement the augmented arithmetic\n   assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n   ">>=", "&=", "^=", "|=").  These methods should attempt to do the\n   operation in-place (modifying *self*) and return the result (which\n   could be, but does not have to be, *self*).  If a specific method\n   is not defined, the augmented assignment falls back to the normal\n   methods.  For instance, to execute the statement "x += y", where\n   *x* is an instance of a class that has an "__iadd__()" method,\n   "x.__iadd__(y)" is called.  If *x* is an instance of a class that\n   does not define a "__iadd__()" method, "x.__add__(y)" and\n   "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n   Called to implement the unary arithmetic operations ("-", "+",\n   "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n   Called to implement the built-in functions "complex()", "int()",\n   "long()", and "float()".  Should return a value of the appropriate\n   type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n   Called to implement the built-in functions "oct()" and "hex()".\n   Should return a string value.\n\nobject.__index__(self)\n\n   Called to implement "operator.index()".  Also called whenever\n   Python needs an integer object (such as in slicing).  Must return\n   an integer (int or long).\n\n   New in version 2.5.\n\nobject.__coerce__(self, other)\n\n   Called to implement "mixed-mode" numeric arithmetic.  Should either\n   return a 2-tuple containing *self* and *other* converted to a\n   common numeric type, or "None" if conversion is impossible.  When\n   the common type would be the type of "other", it is sufficient to\n   return "None", since the interpreter will also ask the other object\n   to attempt a coercion (but sometimes, if the implementation of the\n   other type cannot be changed, it is useful to do the conversion to\n   the other type here).  A return value of "NotImplemented" is\n   equivalent to returning "None".\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion.  As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable.  Instead, here are some informal\nguidelines regarding coercion.  In Python 3, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n  no coercion takes place and the string formatting operation is\n  invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n  mode operations on types that don\'t define coercion pass the\n  original arguments to the operation.\n\n* New-style classes (those derived from "object") never invoke the\n  "__coerce__()" method in response to a binary operator; the only\n  time "__coerce__()" is invoked is when the built-in function\n  "coerce()" is called.\n\n* For most intents and purposes, an operator that returns\n  "NotImplemented" is treated the same as one that is not implemented\n  at all.\n\n* Below, "__op__()" and "__rop__()" are used to signify the generic\n  method names corresponding to an operator; "__iop__()" is used for\n  the corresponding in-place operator.  For example, for the operator\n  \'"+"\', "__add__()" and "__radd__()" are used for the left and right\n  variant of the binary operator, and "__iadd__()" for the in-place\n  variant.\n\n* For objects *x* and *y*, first "x.__op__(y)" is tried.  If this is\n  not implemented or returns "NotImplemented", "y.__rop__(x)" is\n  tried.  If this is also not implemented or returns "NotImplemented",\n  a "TypeError" exception is raised.  But see the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n  of a built-in type or a new-style class, and the right operand is an\n  instance of a proper subclass of that type or class and overrides\n  the base\'s "__rop__()" method, the right operand\'s "__rop__()"\n  method is tried *before* the left operand\'s "__op__()" method.\n\n  This is done so that a subclass can completely override binary\n  operators. Otherwise, the left operand\'s "__op__()" method would\n  always accept the right operand: when an instance of a given class\n  is expected, an instance of a subclass of that class is always\n  acceptable.\n\n* When either operand type defines a coercion, this coercion is\n  called before that type\'s "__op__()" or "__rop__()" method is\n  called, but no sooner.  If the coercion returns an object of a\n  different type for the operand whose coercion is invoked, part of\n  the process is redone using the new object.\n\n* When an in-place operator (like \'"+="\') is used, if the left\n  operand implements "__iop__()", it is invoked without any coercion.\n  When the operation falls back to "__op__()" and/or "__rop__()", the\n  normal coercion rules apply.\n\n* In "x + y", if *x* is a sequence that implements sequence\n  concatenation, sequence concatenation is invoked.\n\n* In "x * y", if one operand is a sequence that implements sequence\n  repetition, and the other is an integer ("int" or "long"), sequence\n  repetition is invoked.\n\n* Rich comparisons (implemented by methods "__eq__()" and so on)\n  never use coercion.  Three-way comparison (implemented by\n  "__cmp__()") does use coercion under the same conditions as other\n  binary operations use it.\n\n* In the current implementation, the built-in numeric types "int",\n  "long", "float", and "complex" do not use coercion. All these types\n  implement a "__coerce__()" method, for use by the built-in\n  "coerce()" function.\n\n  Changed in version 2.7: The complex type no longer makes implicit\n  calls to the "__coerce__()" method for mixed-type binary arithmetic\n  operations.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code.  Context managers are normally\ninvoked using the "with" statement (described in section The with\nstatement), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see Context Manager Types.\n\nobject.__enter__(self)\n\n   Enter the runtime context related to this object. The "with"\n   statement will bind this method\'s return value to the target(s)\n   specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n   Exit the runtime context related to this object. The parameters\n   describe the exception that caused the context to be exited. If the\n   context was exited without an exception, all three arguments will\n   be "None".\n\n   If an exception is supplied, and the method wishes to suppress the\n   exception (i.e., prevent it from being propagated), it should\n   return a true value. Otherwise, the exception will be processed\n   normally upon exit from this method.\n\n   Note that "__exit__()" methods should not reraise the passed-in\n   exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n     The specification, background, and examples for the Python "with"\n     statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n"x.__getitem__(i)" or implicitly as in "x[i]".\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n   >>> class C:\n   ...     pass\n   ...\n   >>> c1 = C()\n   >>> c2 = C()\n   >>> c1.__len__ = lambda: 5\n   >>> c2.__len__ = lambda: 9\n   >>> len(c1)\n   5\n   >>> len(c2)\n   9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary.  That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n   >>> class C(object):\n   ...     pass\n   ...\n   >>> c = C()\n   >>> c.__len__ = lambda: 5\n   >>> len(c)\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in <module>\n   TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n   >>> 1 .__hash__() == hash(1)\n   True\n   >>> int.__hash__() == hash(int)\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in <module>\n   TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n   >>> type(1).__hash__(1) == hash(1)\n   True\n   >>> type(int).__hash__(int) == hash(int)\n   True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n   >>> class Meta(type):\n   ...    def __getattribute__(*args):\n   ...       print "Metaclass getattribute invoked"\n   ...       return type.__getattribute__(*args)\n   ...\n   >>> class C(object):\n   ...     __metaclass__ = Meta\n   ...     def __len__(self):\n   ...         return 10\n   ...     def __getattribute__(*args):\n   ...         print "Class getattribute invoked"\n   ...         return object.__getattribute__(*args)\n   ...\n   >>> c = C()\n   >>> c.__len__()                 # Explicit lookup via instance\n   Class getattribute invoked\n   10\n   >>> type(c).__len__(c)          # Explicit lookup via type\n   Metaclass getattribute invoked\n   10\n   >>> len(c)                      # Implicit lookup\n   10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n    under certain controlled conditions. It generally isn\'t a good\n    idea though, since it can lead to some very strange behaviour if\n    it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n    reflected method (such as "__add__()") fails the operation is not\n    supported, which is why the reflected method is not called.\n',
- 'string-methods': u'\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support.  Some of them are also available on\n"bytearray" objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange section. To output formatted strings use\ntemplate strings or the "%" operator described in the String\nFormatting Operations section. Also, see the "re" module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n   Return a copy of the string with its first character capitalized\n   and the rest lowercased.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n   Return centered in a string of length *width*. Padding is done\n   using the specified *fillchar* (default is a space).\n\n   Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n   Return the number of non-overlapping occurrences of substring *sub*\n   in the range [*start*, *end*].  Optional arguments *start* and\n   *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n   Decodes the string using the codec registered for *encoding*.\n   *encoding* defaults to the default string encoding.  *errors* may\n   be given to set a different error handling scheme.  The default is\n   "\'strict\'", meaning that encoding errors raise "UnicodeError".\n   Other possible values are "\'ignore\'", "\'replace\'" and any other\n   name registered via "codecs.register_error()", see section Codec\n   Base Classes.\n\n   New in version 2.2.\n\n   Changed in version 2.3: Support for other error handling schemes\n   added.\n\n   Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n   Return an encoded version of the string.  Default encoding is the\n   current default string encoding.  *errors* may be given to set a\n   different error handling scheme.  The default for *errors* is\n   "\'strict\'", meaning that encoding errors raise a "UnicodeError".\n   Other possible values are "\'ignore\'", "\'replace\'",\n   "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other name\n   registered via "codecs.register_error()", see section Codec Base\n   Classes. For a list of possible encodings, see section Standard\n   Encodings.\n\n   New in version 2.0.\n\n   Changed in version 2.3: Support for "\'xmlcharrefreplace\'" and\n   "\'backslashreplace\'" and other error handling schemes added.\n\n   Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n   Return "True" if the string ends with the specified *suffix*,\n   otherwise return "False".  *suffix* can also be a tuple of suffixes\n   to look for.  With optional *start*, test beginning at that\n   position.  With optional *end*, stop comparing at that position.\n\n   Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n   Return a copy of the string where all tab characters are replaced\n   by one or more spaces, depending on the current column and the\n   given tab size.  Tab positions occur every *tabsize* characters\n   (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n   To expand the string, the current column is set to zero and the\n   string is examined character by character.  If the character is a\n   tab ("\\t"), one or more space characters are inserted in the result\n   until the current column is equal to the next tab position. (The\n   tab character itself is not copied.)  If the character is a newline\n   ("\\n") or return ("\\r"), it is copied and the current column is\n   reset to zero.  Any other character is copied unchanged and the\n   current column is incremented by one regardless of how the\n   character is represented when printed.\n\n   >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n   \'01      012     0123    01234\'\n   >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n   \'01  012 0123    01234\'\n\nstr.find(sub[, start[, end]])\n\n   Return the lowest index in the string where substring *sub* is\n   found, such that *sub* is contained in the slice "s[start:end]".\n   Optional arguments *start* and *end* are interpreted as in slice\n   notation.  Return "-1" if *sub* is not found.\n\n   Note: The "find()" method should be used only if you need to know\n     the position of *sub*.  To check if *sub* is a substring or not,\n     use the "in" operator:\n\n        >>> \'Py\' in \'Python\'\n        True\n\nstr.format(*args, **kwargs)\n\n   Perform a string formatting operation.  The string on which this\n   method is called can contain literal text or replacement fields\n   delimited by braces "{}".  Each replacement field contains either\n   the numeric index of a positional argument, or the name of a\n   keyword argument.  Returns a copy of the string where each\n   replacement field is replaced with the string value of the\n   corresponding argument.\n\n   >>> "The sum of 1 + 2 is {0}".format(1+2)\n   \'The sum of 1 + 2 is 3\'\n\n   See Format String Syntax for a description of the various\n   formatting options that can be specified in format strings.\n\n   This method of string formatting is the new standard in Python 3,\n   and should be preferred to the "%" formatting described in String\n   Formatting Operations in new code.\n\n   New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n   Like "find()", but raise "ValueError" when the substring is not\n   found.\n\nstr.isalnum()\n\n   Return true if all characters in the string are alphanumeric and\n   there is at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n   Return true if all characters in the string are alphabetic and\n   there is at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n   Return true if all characters in the string are digits and there is\n   at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n   Return true if all cased characters [4] in the string are lowercase\n   and there is at least one cased character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n   Return true if there are only whitespace characters in the string\n   and there is at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n   Return true if the string is a titlecased string and there is at\n   least one character, for example uppercase characters may only\n   follow uncased characters and lowercase characters only cased ones.\n   Return false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n   Return true if all cased characters [4] in the string are uppercase\n   and there is at least one cased character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n   Return a string which is the concatenation of the strings in the\n   *iterable* *iterable*.  The separator between elements is the\n   string providing this method.\n\nstr.ljust(width[, fillchar])\n\n   Return the string left justified in a string of length *width*.\n   Padding is done using the specified *fillchar* (default is a\n   space).  The original string is returned if *width* is less than or\n   equal to "len(s)".\n\n   Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n   Return a copy of the string with all the cased characters [4]\n   converted to lowercase.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n   Return a copy of the string with leading characters removed.  The\n   *chars* argument is a string specifying the set of characters to be\n   removed.  If omitted or "None", the *chars* argument defaults to\n   removing whitespace.  The *chars* argument is not a prefix; rather,\n   all combinations of its values are stripped:\n\n   >>> \'   spacious   \'.lstrip()\n   \'spacious   \'\n   >>> \'www.example.com\'.lstrip(\'cmowz.\')\n   \'example.com\'\n\n   Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n   Split the string at the first occurrence of *sep*, and return a\n   3-tuple containing the part before the separator, the separator\n   itself, and the part after the separator.  If the separator is not\n   found, return a 3-tuple containing the string itself, followed by\n   two empty strings.\n\n   New in version 2.5.\n\nstr.replace(old, new[, count])\n\n   Return a copy of the string with all occurrences of substring *old*\n   replaced by *new*.  If the optional argument *count* is given, only\n   the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n   Return the highest index in the string where substring *sub* is\n   found, such that *sub* is contained within "s[start:end]".\n   Optional arguments *start* and *end* are interpreted as in slice\n   notation.  Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n   Like "rfind()" but raises "ValueError" when the substring *sub* is\n   not found.\n\nstr.rjust(width[, fillchar])\n\n   Return the string right justified in a string of length *width*.\n   Padding is done using the specified *fillchar* (default is a\n   space). The original string is returned if *width* is less than or\n   equal to "len(s)".\n\n   Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n   Split the string at the last occurrence of *sep*, and return a\n   3-tuple containing the part before the separator, the separator\n   itself, and the part after the separator.  If the separator is not\n   found, return a 3-tuple containing two empty strings, followed by\n   the string itself.\n\n   New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n   Return a list of the words in the string, using *sep* as the\n   delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n   are done, the *rightmost* ones.  If *sep* is not specified or\n   "None", any whitespace string is a separator.  Except for splitting\n   from the right, "rsplit()" behaves like "split()" which is\n   described in detail below.\n\n   New in version 2.4.\n\nstr.rstrip([chars])\n\n   Return a copy of the string with trailing characters removed.  The\n   *chars* argument is a string specifying the set of characters to be\n   removed.  If omitted or "None", the *chars* argument defaults to\n   removing whitespace.  The *chars* argument is not a suffix; rather,\n   all combinations of its values are stripped:\n\n   >>> \'   spacious   \'.rstrip()\n   \'   spacious\'\n   >>> \'mississippi\'.rstrip(\'ipz\')\n   \'mississ\'\n\n   Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n   Return a list of the words in the string, using *sep* as the\n   delimiter string.  If *maxsplit* is given, at most *maxsplit*\n   splits are done (thus, the list will have at most "maxsplit+1"\n   elements).  If *maxsplit* is not specified or "-1", then there is\n   no limit on the number of splits (all possible splits are made).\n\n   If *sep* is given, consecutive delimiters are not grouped together\n   and are deemed to delimit empty strings (for example,\n   "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']").  The *sep* argument\n   may consist of multiple characters (for example,\n   "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n   empty string with a specified separator returns "[\'\']".\n\n   If *sep* is not specified or is "None", a different splitting\n   algorithm is applied: runs of consecutive whitespace are regarded\n   as a single separator, and the result will contain no empty strings\n   at the start or end if the string has leading or trailing\n   whitespace.  Consequently, splitting an empty string or a string\n   consisting of just whitespace with a "None" separator returns "[]".\n\n   For example, "\' 1  2   3  \'.split()" returns "[\'1\', \'2\', \'3\']", and\n   "\'  1  2   3  \'.split(None, 1)" returns "[\'1\', \'2   3  \']".\n\nstr.splitlines([keepends])\n\n   Return a list of the lines in the string, breaking at line\n   boundaries. This method uses the *universal newlines* approach to\n   splitting lines. Line breaks are not included in the resulting list\n   unless *keepends* is given and true.\n\n   For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n   c\', \'\', \'de fg\', \'kl\']", while the same call with\n   "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']".\n\n   Unlike "split()" when a delimiter string *sep* is given, this\n   method returns an empty list for the empty string, and a terminal\n   line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n   Return "True" if string starts with the *prefix*, otherwise return\n   "False". *prefix* can also be a tuple of prefixes to look for.\n   With optional *start*, test string beginning at that position.\n   With optional *end*, stop comparing string at that position.\n\n   Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n   Return a copy of the string with the leading and trailing\n   characters removed. The *chars* argument is a string specifying the\n   set of characters to be removed. If omitted or "None", the *chars*\n   argument defaults to removing whitespace. The *chars* argument is\n   not a prefix or suffix; rather, all combinations of its values are\n   stripped:\n\n   >>> \'   spacious   \'.strip()\n   \'spacious\'\n   >>> \'www.example.com\'.strip(\'cmowz.\')\n   \'example\'\n\n   Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n   Return a copy of the string with uppercase characters converted to\n   lowercase and vice versa.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n   Return a titlecased version of the string where words start with an\n   uppercase character and the remaining characters are lowercase.\n\n   The algorithm uses a simple language-independent definition of a\n   word as groups of consecutive letters.  The definition works in\n   many contexts but it means that apostrophes in contractions and\n   possessives form word boundaries, which may not be the desired\n   result:\n\n      >>> "they\'re bill\'s friends from the UK".title()\n      "They\'Re Bill\'S Friends From The Uk"\n\n   A workaround for apostrophes can be constructed using regular\n   expressions:\n\n      >>> import re\n      >>> def titlecase(s):\n      ...     return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n      ...                   lambda mo: mo.group(0)[0].upper() +\n      ...                              mo.group(0)[1:].lower(),\n      ...                   s)\n      ...\n      >>> titlecase("they\'re bill\'s friends.")\n      "They\'re Bill\'s Friends."\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n   Return a copy of the string where all characters occurring in the\n   optional argument *deletechars* are removed, and the remaining\n   characters have been mapped through the given translation table,\n   which must be a string of length 256.\n\n   You can use the "maketrans()" helper function in the "string"\n   module to create a translation table. For string objects, set the\n   *table* argument to "None" for translations that only delete\n   characters:\n\n   >>> \'read this short text\'.translate(None, \'aeiou\')\n   \'rd ths shrt txt\'\n\n   New in version 2.6: Support for a "None" *table* argument.\n\n   For Unicode objects, the "translate()" method does not accept the\n   optional *deletechars* argument.  Instead, it returns a copy of the\n   *s* where all characters have been mapped through the given\n   translation table which must be a mapping of Unicode ordinals to\n   Unicode ordinals, Unicode strings or "None". Unmapped characters\n   are left untouched. Characters mapped to "None" are deleted.  Note,\n   a more flexible approach is to create a custom character mapping\n   codec using the "codecs" module (see "encodings.cp1251" for an\n   example).\n\nstr.upper()\n\n   Return a copy of the string with all the cased characters [4]\n   converted to uppercase.  Note that "str.upper().isupper()" might be\n   "False" if "s" contains uncased characters or if the Unicode\n   category of the resulting character(s) is not "Lu" (Letter,\n   uppercase), but e.g. "Lt" (Letter, titlecase).\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n   Return the numeric string left filled with zeros in a string of\n   length *width*.  A sign prefix is handled correctly.  The original\n   string is returned if *width* is less than or equal to "len(s)".\n\n   New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n   Return "True" if there are only numeric characters in S, "False"\n   otherwise. Numeric characters include digit characters, and all\n   characters that have the Unicode numeric value property, e.g.\n   U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n   Return "True" if there are only decimal characters in S, "False"\n   otherwise. Decimal characters include digit characters, and all\n   characters that can be used to form decimal-radix numbers, e.g.\n   U+0660, ARABIC-INDIC DIGIT ZERO.\n',
+ 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators.  For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "x.__getitem__(i)" for old-style\nclasses and "type(x).__getitem__(x, i)" for new-style classes.  Except\nwhere mentioned, attempts to execute an operation raise an exception\nwhen no appropriate method is defined (typically "AttributeError" or\n"TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled.  For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense.  (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n   Called to create a new instance of class *cls*.  "__new__()" is a\n   static method (special-cased so you need not declare it as such)\n   that takes the class of which an instance was requested as its\n   first argument.  The remaining arguments are those passed to the\n   object constructor expression (the call to the class).  The return\n   value of "__new__()" should be the new object instance (usually an\n   instance of *cls*).\n\n   Typical implementations create a new instance of the class by\n   invoking the superclass\'s "__new__()" method using\n   "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n   arguments and then modifying the newly-created instance as\n   necessary before returning it.\n\n   If "__new__()" returns an instance of *cls*, then the new\n   instance\'s "__init__()" method will be invoked like\n   "__init__(self[, ...])", where *self* is the new instance and the\n   remaining arguments are the same as were passed to "__new__()".\n\n   If "__new__()" does not return an instance of *cls*, then the new\n   instance\'s "__init__()" method will not be invoked.\n\n   "__new__()" is intended mainly to allow subclasses of immutable\n   types (like int, str, or tuple) to customize instance creation.  It\n   is also commonly overridden in custom metaclasses in order to\n   customize class creation.\n\nobject.__init__(self[, ...])\n\n   Called after the instance has been created (by "__new__()"), but\n   before it is returned to the caller.  The arguments are those\n   passed to the class constructor expression.  If a base class has an\n   "__init__()" method, the derived class\'s "__init__()" method, if\n   any, must explicitly call it to ensure proper initialization of the\n   base class part of the instance; for example:\n   "BaseClass.__init__(self, [args...])".\n\n   Because "__new__()" and "__init__()" work together in constructing\n   objects ("__new__()" to create it, and "__init__()" to customise\n   it), no non-"None" value may be returned by "__init__()"; doing so\n   will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n   Called when the instance is about to be destroyed.  This is also\n   called a destructor.  If a base class has a "__del__()" method, the\n   derived class\'s "__del__()" method, if any, must explicitly call it\n   to ensure proper deletion of the base class part of the instance.\n   Note that it is possible (though not recommended!) for the\n   "__del__()" method to postpone destruction of the instance by\n   creating a new reference to it.  It may then be called at a later\n   time when this new reference is deleted.  It is not guaranteed that\n   "__del__()" methods are called for objects that still exist when\n   the interpreter exits.\n\n   Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n     decrements the reference count for "x" by one, and the latter is\n     only called when "x"\'s reference count reaches zero.  Some common\n     situations that may prevent the reference count of an object from\n     going to zero include: circular references between objects (e.g.,\n     a doubly-linked list or a tree data structure with parent and\n     child pointers); a reference to the object on the stack frame of\n     a function that caught an exception (the traceback stored in\n     "sys.exc_traceback" keeps the stack frame alive); or a reference\n     to the object on the stack frame that raised an unhandled\n     exception in interactive mode (the traceback stored in\n     "sys.last_traceback" keeps the stack frame alive).  The first\n     situation can only be remedied by explicitly breaking the cycles;\n     the latter two situations can be resolved by storing "None" in\n     "sys.exc_traceback" or "sys.last_traceback".  Circular references\n     which are garbage are detected when the option cycle detector is\n     enabled (it\'s on by default), but can only be cleaned up if there\n     are no Python-level "__del__()" methods involved. Refer to the\n     documentation for the "gc" module for more information about how\n     "__del__()" methods are handled by the cycle detector,\n     particularly the description of the "garbage" value.\n\n   Warning: Due to the precarious circumstances under which\n     "__del__()" methods are invoked, exceptions that occur during\n     their execution are ignored, and a warning is printed to\n     "sys.stderr" instead. Also, when "__del__()" is invoked in\n     response to a module being deleted (e.g., when execution of the\n     program is done), other globals referenced by the "__del__()"\n     method may already have been deleted or in the process of being\n     torn down (e.g. the import machinery shutting down).  For this\n     reason, "__del__()" methods should do the absolute minimum needed\n     to maintain external invariants.  Starting with version 1.5,\n     Python guarantees that globals whose name begins with a single\n     underscore are deleted from their module before other globals are\n     deleted; if no other references to such globals exist, this may\n     help in assuring that imported modules are still available at the\n     time when the "__del__()" method is called.\n\n   See also the "-R" command-line option.\n\nobject.__repr__(self)\n\n   Called by the "repr()" built-in function and by string conversions\n   (reverse quotes) to compute the "official" string representation of\n   an object.  If at all possible, this should look like a valid\n   Python expression that could be used to recreate an object with the\n   same value (given an appropriate environment).  If this is not\n   possible, a string of the form "<...some useful description...>"\n   should be returned.  The return value must be a string object. If a\n   class defines "__repr__()" but not "__str__()", then "__repr__()"\n   is also used when an "informal" string representation of instances\n   of that class is required.\n\n   This is typically used for debugging, so it is important that the\n   representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n   Called by the "str()" built-in function and by the "print"\n   statement to compute the "informal" string representation of an\n   object.  This differs from "__repr__()" in that it does not have to\n   be a valid Python expression: a more convenient or concise\n   representation may be used instead. The return value must be a\n   string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n   New in version 2.1.\n\n   These are the so-called "rich comparison" methods, and are called\n   for comparison operators in preference to "__cmp__()" below. The\n   correspondence between operator symbols and method names is as\n   follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n   "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n   "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n   A rich comparison method may return the singleton "NotImplemented"\n   if it does not implement the operation for a given pair of\n   arguments. By convention, "False" and "True" are returned for a\n   successful comparison. However, these methods can return any value,\n   so if the comparison operator is used in a Boolean context (e.g.,\n   in the condition of an "if" statement), Python will call "bool()"\n   on the value to determine if the result is true or false.\n\n   There are no implied relationships among the comparison operators.\n   The truth of "x==y" does not imply that "x!=y" is false.\n   Accordingly, when defining "__eq__()", one should also define\n   "__ne__()" so that the operators will behave as expected.  See the\n   paragraph on "__hash__()" for some important notes on creating\n   *hashable* objects which support custom comparison operations and\n   are usable as dictionary keys.\n\n   There are no swapped-argument versions of these methods (to be used\n   when the left argument does not support the operation but the right\n   argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n   reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n   and "__eq__()" and "__ne__()" are their own reflection.\n\n   Arguments to rich comparison methods are never coerced.\n\n   To automatically generate ordering operations from a single root\n   operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n   Called by comparison operations if rich comparison (see above) is\n   not defined.  Should return a negative integer if "self < other",\n   zero if "self == other", a positive integer if "self > other".  If\n   no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n   class instances are compared by object identity ("address").  See\n   also the description of "__hash__()" for some important notes on\n   creating *hashable* objects which support custom comparison\n   operations and are usable as dictionary keys. (Note: the\n   restriction that exceptions are not propagated by "__cmp__()" has\n   been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n   Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n   Called by built-in function "hash()" and for operations on members\n   of hashed collections including "set", "frozenset", and "dict".\n   "__hash__()" should return an integer.  The only required property\n   is that objects which compare equal have the same hash value; it is\n   advised to somehow mix together (e.g. using exclusive or) the hash\n   values for the components of the object that also play a part in\n   comparison of objects.\n\n   If a class does not define a "__cmp__()" or "__eq__()" method it\n   should not define a "__hash__()" operation either; if it defines\n   "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n   not be usable in hashed collections.  If a class defines mutable\n   objects and implements a "__cmp__()" or "__eq__()" method, it\n   should not implement "__hash__()", since hashable collection\n   implementations require that a object\'s hash value is immutable (if\n   the object\'s hash value changes, it will be in the wrong hash\n   bucket).\n\n   User-defined classes have "__cmp__()" and "__hash__()" methods by\n   default; with them, all objects compare unequal (except with\n   themselves) and "x.__hash__()" returns a result derived from\n   "id(x)".\n\n   Classes which inherit a "__hash__()" method from a parent class but\n   change the meaning of "__cmp__()" or "__eq__()" such that the hash\n   value returned is no longer appropriate (e.g. by switching to a\n   value-based concept of equality instead of the default identity\n   based equality) can explicitly flag themselves as being unhashable\n   by setting "__hash__ = None" in the class definition. Doing so\n   means that not only will instances of the class raise an\n   appropriate "TypeError" when a program attempts to retrieve their\n   hash value, but they will also be correctly identified as\n   unhashable when checking "isinstance(obj, collections.Hashable)"\n   (unlike classes which define their own "__hash__()" to explicitly\n   raise "TypeError").\n\n   Changed in version 2.5: "__hash__()" may now also return a long\n   integer object; the 32-bit integer is then derived from the hash of\n   that object.\n\n   Changed in version 2.6: "__hash__" may now be set to "None" to\n   explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n   Called to implement truth value testing and the built-in operation\n   "bool()"; should return "False" or "True", or their integer\n   equivalents "0" or "1".  When this method is not defined,\n   "__len__()" is called, if it is defined, and the object is\n   considered true if its result is nonzero. If a class defines\n   neither "__len__()" nor "__nonzero__()", all its instances are\n   considered true.\n\nobject.__unicode__(self)\n\n   Called to implement "unicode()" built-in; should return a Unicode\n   object. When this method is not defined, string conversion is\n   attempted, and the result of string conversion is converted to\n   Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n   Called when an attribute lookup has not found the attribute in the\n   usual places (i.e. it is not an instance attribute nor is it found\n   in the class tree for "self").  "name" is the attribute name. This\n   method should return the (computed) attribute value or raise an\n   "AttributeError" exception.\n\n   Note that if the attribute is found through the normal mechanism,\n   "__getattr__()" is not called.  (This is an intentional asymmetry\n   between "__getattr__()" and "__setattr__()".) This is done both for\n   efficiency reasons and because otherwise "__getattr__()" would have\n   no way to access other attributes of the instance.  Note that at\n   least for instance variables, you can fake total control by not\n   inserting any values in the instance attribute dictionary (but\n   instead inserting them in another object).  See the\n   "__getattribute__()" method below for a way to actually get total\n   control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n   Called when an attribute assignment is attempted.  This is called\n   instead of the normal mechanism (i.e. store the value in the\n   instance dictionary).  *name* is the attribute name, *value* is the\n   value to be assigned to it.\n\n   If "__setattr__()" wants to assign to an instance attribute, it\n   should not simply execute "self.name = value" --- this would cause\n   a recursive call to itself.  Instead, it should insert the value in\n   the dictionary of instance attributes, e.g., "self.__dict__[name] =\n   value".  For new-style classes, rather than accessing the instance\n   dictionary, it should call the base class method with the same\n   name, for example, "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n   Like "__setattr__()" but for attribute deletion instead of\n   assignment.  This should only be implemented if "del obj.name" is\n   meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n   Called unconditionally to implement attribute accesses for\n   instances of the class. If the class also defines "__getattr__()",\n   the latter will not be called unless "__getattribute__()" either\n   calls it explicitly or raises an "AttributeError". This method\n   should return the (computed) attribute value or raise an\n   "AttributeError" exception. In order to avoid infinite recursion in\n   this method, its implementation should always call the base class\n   method with the same name to access any attributes it needs, for\n   example, "object.__getattribute__(self, name)".\n\n   Note: This method may still be bypassed when looking up special\n     methods as the result of implicit invocation via language syntax\n     or built-in functions. See Special method lookup for new-style\n     classes.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents).  In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n   Called to get the attribute of the owner class (class attribute\n   access) or of an instance of that class (instance attribute\n   access). *owner* is always the owner class, while *instance* is the\n   instance that the attribute was accessed through, or "None" when\n   the attribute is accessed through the *owner*.  This method should\n   return the (computed) attribute value or raise an "AttributeError"\n   exception.\n\nobject.__set__(self, instance, value)\n\n   Called to set the attribute on an instance *instance* of the owner\n   class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n   Called to delete the attribute on an instance *instance* of the\n   owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol:  "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead.  Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.  Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass "object()" or "type()").\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n   The simplest and least common call is when user code directly\n   invokes a descriptor method:    "x.__get__(a)".\n\nInstance Binding\n   If binding to a new-style object instance, "a.x" is transformed\n   into the call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n   If binding to a new-style class, "A.x" is transformed into the\n   call: "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n   If "a" is an instance of "super", then the binding "super(B,\n   obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n   immediately preceding "B" and then invokes the descriptor with the\n   call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined.  A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()".  If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary.  If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor.  Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method.  Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary.  In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors.  Accordingly, instances can\nredefine and override methods.  This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage.  This wastes space for objects\nhaving very few instance variables.  The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition.  The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable.  Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n   This class variable can be assigned a string, iterable, or sequence\n   of strings with variable names used by instances.  If defined in a\n   new-style class, *__slots__* reserves space for the declared\n   variables and prevents the automatic creation of *__dict__* and\n   *__weakref__* for each instance.\n\n   New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n  attribute of that class will always be accessible, so a *__slots__*\n  definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n  variables not listed in the *__slots__* definition.  Attempts to\n  assign to an unlisted variable name raises "AttributeError". If\n  dynamic assignment of new variables is desired, then add\n  "\'__dict__\'" to the sequence of strings in the *__slots__*\n  declaration.\n\n  Changed in version 2.3: Previously, adding "\'__dict__\'" to the\n  *__slots__* declaration would not enable the assignment of new\n  attributes not specifically listed in the sequence of instance\n  variable names.\n\n* Without a *__weakref__* variable for each instance, classes\n  defining *__slots__* do not support weak references to its\n  instances. If weak reference support is needed, then add\n  "\'__weakref__\'" to the sequence of strings in the *__slots__*\n  declaration.\n\n  Changed in version 2.3: Previously, adding "\'__weakref__\'" to the\n  *__slots__* declaration would not enable support for weak\n  references.\n\n* *__slots__* are implemented at the class level by creating\n  descriptors (Implementing Descriptors) for each variable name.  As a\n  result, class attributes cannot be used to set default values for\n  instance variables defined by *__slots__*; otherwise, the class\n  attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n  where it is defined.  As a result, subclasses will have a *__dict__*\n  unless they also define *__slots__* (which must only contain names\n  of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n  instance variable defined by the base class slot is inaccessible\n  (except by retrieving its descriptor directly from the base class).\n  This renders the meaning of the program undefined.  In the future, a\n  check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n  "variable-length" built-in types such as "long", "str" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n  may also be used; however, in the future, special meaning may be\n  assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n  *__slots__*.\n\n  Changed in version 2.6: Previously, *__class__* assignment raised an\n  error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using "type()". A class\ndefinition is read into a separate namespace and the value of class\nname is bound to the result of "type(name, bases, dict)".\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of "type()". This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing\n  the role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s "__new__()"\nmethod -- "type.__new__()" can then be called from this method to\ncreate a class with different properties.  This example adds a new\nelement to the class dictionary before creating the class:\n\n   class metacls(type):\n       def __new__(mcs, name, bases, dict):\n           dict[\'foo\'] = \'metacls was here\'\n           return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom "__call__()" method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n   This variable can be any callable accepting arguments for "name",\n   "bases", and "dict".  Upon class creation, the callable is used\n   instead of the built-in "type()".\n\n   New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If "dict[\'__metaclass__\']" exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n  used (this looks for a *__class__* attribute first and if not found,\n  uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n  used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n  used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n   Return true if *instance* should be considered a (direct or\n   indirect) instance of *class*. If defined, called to implement\n   "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n   Return true if *subclass* should be considered a (direct or\n   indirect) subclass of *class*.  If defined, called to implement\n   "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass.  They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n  **PEP 3119** - Introducing Abstract Base Classes\n     Includes the specification for customizing "isinstance()" and\n     "issubclass()" behavior through "__instancecheck__()" and\n     "__subclasscheck__()", with motivation for this functionality in\n     the context of adding Abstract Base Classes (see the "abc"\n     module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n   Called when the instance is "called" as a function; if this method\n   is defined, "x(arg1, arg2, ...)" is a shorthand for\n   "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well.  The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. (For backwards compatibility, the method\n"__getslice__()" (see below) can also be defined to handle simple, but\nnot extended slices.) It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "has_key()", "get()",\n"clear()", "setdefault()", "iterkeys()", "itervalues()",\n"iteritems()", "pop()", "popitem()", "copy()", and "update()" behaving\nsimilar to those for Python\'s standard dictionary objects.  The\n"UserDict" module provides a "DictMixin" class to help create those\nmethods from a base set of "__getitem__()", "__setitem__()",\n"__delitem__()", and "keys()". Mutable sequences should provide\nmethods "append()", "count()", "index()", "extend()", "insert()",\n"pop()", "remove()", "reverse()" and "sort()", like Python standard\nlist objects.  Finally, sequence types should implement addition\n(meaning concatenation) and multiplication (meaning repetition) by\ndefining the methods "__add__()", "__radd__()", "__iadd__()",\n"__mul__()", "__rmul__()" and "__imul__()" described below; they\nshould not define "__coerce__()" or other numerical operators.  It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should be equivalent of "has_key()"; for sequences,\nit should search through the values.  It is further recommended that\nboth mappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "iterkeys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n   Called to implement the built-in function "len()".  Should return\n   the length of the object, an integer ">=" 0.  Also, an object that\n   doesn\'t define a "__nonzero__()" method and whose "__len__()"\n   method returns zero is considered to be false in a Boolean context.\n\nobject.__getitem__(self, key)\n\n   Called to implement evaluation of "self[key]". For sequence types,\n   the accepted keys should be integers and slice objects.  Note that\n   the special interpretation of negative indexes (if the class wishes\n   to emulate a sequence type) is up to the "__getitem__()" method. If\n   *key* is of an inappropriate type, "TypeError" may be raised; if of\n   a value outside the set of indexes for the sequence (after any\n   special interpretation of negative values), "IndexError" should be\n   raised. For mapping types, if *key* is missing (not in the\n   container), "KeyError" should be raised.\n\n   Note: "for" loops expect that an "IndexError" will be raised for\n     illegal indexes to allow proper detection of the end of the\n     sequence.\n\nobject.__missing__(self, key)\n\n   Called by "dict"."__getitem__()" to implement "self[key]" for dict\n   subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n   Called to implement assignment to "self[key]".  Same note as for\n   "__getitem__()".  This should only be implemented for mappings if\n   the objects support changes to the values for keys, or if new keys\n   can be added, or for sequences if elements can be replaced.  The\n   same exceptions should be raised for improper *key* values as for\n   the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n   Called to implement deletion of "self[key]".  Same note as for\n   "__getitem__()".  This should only be implemented for mappings if\n   the objects support removal of keys, or for sequences if elements\n   can be removed from the sequence.  The same exceptions should be\n   raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n   This method is called when an iterator is required for a container.\n   This method should return a new iterator object that can iterate\n   over all the objects in the container.  For mappings, it should\n   iterate over the keys of the container, and should also be made\n   available as the method "iterkeys()".\n\n   Iterator objects also need to implement this method; they are\n   required to return themselves.  For more information on iterator\n   objects, see Iterator Types.\n\nobject.__reversed__(self)\n\n   Called (if present) by the "reversed()" built-in to implement\n   reverse iteration.  It should return a new iterator object that\n   iterates over all the objects in the container in reverse order.\n\n   If the "__reversed__()" method is not provided, the "reversed()"\n   built-in will fall back to using the sequence protocol ("__len__()"\n   and "__getitem__()").  Objects that support the sequence protocol\n   should only provide "__reversed__()" if they can provide an\n   implementation that is more efficient than the one provided by\n   "reversed()".\n\n   New in version 2.6.\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence.  However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n   Called to implement membership test operators.  Should return true\n   if *item* is in *self*, false otherwise.  For mapping objects, this\n   should consider the keys of the mapping rather than the values or\n   the key-item pairs.\n\n   For objects that don\'t define "__contains__()", the membership test\n   first tries iteration via "__iter__()", then the old sequence\n   iteration protocol via "__getitem__()", see this section in the\n   language reference.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects.  Immutable sequences methods should at most only\ndefine "__getslice__()"; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n   Deprecated since version 2.0: Support slice objects as parameters\n   to the "__getitem__()" method. (However, built-in types in CPython\n   currently still implement "__getslice__()".  Therefore, you have to\n   override it in derived classes when implementing slicing.)\n\n   Called to implement evaluation of "self[i:j]". The returned object\n   should be of the same type as *self*.  Note that missing *i* or *j*\n   in the slice expression are replaced by zero or "sys.maxsize",\n   respectively.  If negative indexes are used in the slice, the\n   length of the sequence is added to that index. If the instance does\n   not implement the "__len__()" method, an "AttributeError" is\n   raised. No guarantee is made that indexes adjusted this way are not\n   still negative.  Indexes which are greater than the length of the\n   sequence are not modified. If no "__getslice__()" is found, a slice\n   object is created instead, and passed to "__getitem__()" instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n   Called to implement assignment to "self[i:j]". Same notes for *i*\n   and *j* as for "__getslice__()".\n\n   This method is deprecated. If no "__setslice__()" is found, or for\n   extended slicing of the form "self[i:j:k]", a slice object is\n   created, and passed to "__setitem__()", instead of "__setslice__()"\n   being called.\n\nobject.__delslice__(self, i, j)\n\n   Called to implement deletion of "self[i:j]". Same notes for *i* and\n   *j* as for "__getslice__()". This method is deprecated. If no\n   "__delslice__()" is found, or for extended slicing of the form\n   "self[i:j:k]", a slice object is created, and passed to\n   "__delitem__()", instead of "__delslice__()" being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available.  For slice\noperations involving extended slice notation, or in absence of the\nslice methods, "__getitem__()", "__setitem__()" or "__delitem__()" is\ncalled with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n"__getitem__()", "__setitem__()" and "__delitem__()" support slice\nobjects as arguments):\n\n   class MyClass:\n       ...\n       def __getitem__(self, index):\n           ...\n       def __setitem__(self, index, value):\n           ...\n       def __delitem__(self, index):\n           ...\n\n       if sys.version_info < (2, 0):\n           # They won\'t be defined if version is at least 2.0 final\n\n           def __getslice__(self, i, j):\n               return self[max(0, i):max(0, j):]\n           def __setslice__(self, i, j, seq):\n               self[max(0, i):max(0, j):] = seq\n           def __delslice__(self, i, j):\n               del self[max(0, i):max(0, j):]\n       ...\n\nNote the calls to "max()"; these are necessary because of the handling\nof negative indices before the "__*slice__()" methods are called.\nWhen negative indexes are used, the "__*item__()" methods receive them\nas provided, but the "__*slice__()" methods get a "cooked" form of the\nindex values.  For each negative index value, the length of the\nsequence is added to the index before calling the method (which may\nstill result in a negative index); this is the customary handling of\nnegative indexes by the built-in sequence types, and the "__*item__()"\nmethods are expected to do this as well.  However, since they should\nalready be doing that, negative indexes cannot be passed in; they must\nbe constrained to the bounds of the sequence before being passed to\nthe "__*item__()" methods. Calling "max(0, i)" conveniently returns\nthe proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n   These methods are called to implement the binary arithmetic\n   operations ("+", "-", "*", "//", "%", "divmod()", "pow()", "**",\n   "<<", ">>", "&", "^", "|").  For instance, to evaluate the\n   expression "x + y", where *x* is an instance of a class that has an\n   "__add__()" method, "x.__add__(y)" is called.  The "__divmod__()"\n   method should be the equivalent to using "__floordiv__()" and\n   "__mod__()"; it should not be related to "__truediv__()" (described\n   below).  Note that "__pow__()" should be defined to accept an\n   optional third argument if the ternary version of the built-in\n   "pow()" function is to be supported.\n\n   If one of those methods does not support the operation with the\n   supplied arguments, it should return "NotImplemented".\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n   The division operator ("/") is implemented by these methods.  The\n   "__truediv__()" method is used when "__future__.division" is in\n   effect, otherwise "__div__()" is used.  If only one of these two\n   methods is defined, the object will not support division in the\n   alternate context; "TypeError" will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n   These methods are called to implement the binary arithmetic\n   operations ("+", "-", "*", "/", "%", "divmod()", "pow()", "**",\n   "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n   These functions are only called if the left operand does not\n   support the corresponding operation and the operands are of\n   different types. [2] For instance, to evaluate the expression "x -\n   y", where *y* is an instance of a class that has an "__rsub__()"\n   method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n   *NotImplemented*.\n\n   Note that ternary "pow()" will not try calling "__rpow__()" (the\n   coercion rules would become too complicated).\n\n   Note: If the right operand\'s type is a subclass of the left\n     operand\'s type and that subclass provides the reflected method\n     for the operation, this method will be called before the left\n     operand\'s non-reflected method.  This behavior allows subclasses\n     to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n   These methods are called to implement the augmented arithmetic\n   assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n   ">>=", "&=", "^=", "|=").  These methods should attempt to do the\n   operation in-place (modifying *self*) and return the result (which\n   could be, but does not have to be, *self*).  If a specific method\n   is not defined, the augmented assignment falls back to the normal\n   methods.  For instance, to execute the statement "x += y", where\n   *x* is an instance of a class that has an "__iadd__()" method,\n   "x.__iadd__(y)" is called.  If *x* is an instance of a class that\n   does not define a "__iadd__()" method, "x.__add__(y)" and\n   "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n   Called to implement the unary arithmetic operations ("-", "+",\n   "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n   Called to implement the built-in functions "complex()", "int()",\n   "long()", and "float()".  Should return a value of the appropriate\n   type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n   Called to implement the built-in functions "oct()" and "hex()".\n   Should return a string value.\n\nobject.__index__(self)\n\n   Called to implement "operator.index()".  Also called whenever\n   Python needs an integer object (such as in slicing).  Must return\n   an integer (int or long).\n\n   New in version 2.5.\n\nobject.__coerce__(self, other)\n\n   Called to implement "mixed-mode" numeric arithmetic.  Should either\n   return a 2-tuple containing *self* and *other* converted to a\n   common numeric type, or "None" if conversion is impossible.  When\n   the common type would be the type of "other", it is sufficient to\n   return "None", since the interpreter will also ask the other object\n   to attempt a coercion (but sometimes, if the implementation of the\n   other type cannot be changed, it is useful to do the conversion to\n   the other type here).  A return value of "NotImplemented" is\n   equivalent to returning "None".\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion.  As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable.  Instead, here are some informal\nguidelines regarding coercion.  In Python 3, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n  no coercion takes place and the string formatting operation is\n  invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n  mode operations on types that don\'t define coercion pass the\n  original arguments to the operation.\n\n* New-style classes (those derived from "object") never invoke the\n  "__coerce__()" method in response to a binary operator; the only\n  time "__coerce__()" is invoked is when the built-in function\n  "coerce()" is called.\n\n* For most intents and purposes, an operator that returns\n  "NotImplemented" is treated the same as one that is not implemented\n  at all.\n\n* Below, "__op__()" and "__rop__()" are used to signify the generic\n  method names corresponding to an operator; "__iop__()" is used for\n  the corresponding in-place operator.  For example, for the operator\n  \'"+"\', "__add__()" and "__radd__()" are used for the left and right\n  variant of the binary operator, and "__iadd__()" for the in-place\n  variant.\n\n* For objects *x* and *y*, first "x.__op__(y)" is tried.  If this is\n  not implemented or returns "NotImplemented", "y.__rop__(x)" is\n  tried.  If this is also not implemented or returns "NotImplemented",\n  a "TypeError" exception is raised.  But see the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n  of a built-in type or a new-style class, and the right operand is an\n  instance of a proper subclass of that type or class and overrides\n  the base\'s "__rop__()" method, the right operand\'s "__rop__()"\n  method is tried *before* the left operand\'s "__op__()" method.\n\n  This is done so that a subclass can completely override binary\n  operators. Otherwise, the left operand\'s "__op__()" method would\n  always accept the right operand: when an instance of a given class\n  is expected, an instance of a subclass of that class is always\n  acceptable.\n\n* When either operand type defines a coercion, this coercion is\n  called before that type\'s "__op__()" or "__rop__()" method is\n  called, but no sooner.  If the coercion returns an object of a\n  different type for the operand whose coercion is invoked, part of\n  the process is redone using the new object.\n\n* When an in-place operator (like \'"+="\') is used, if the left\n  operand implements "__iop__()", it is invoked without any coercion.\n  When the operation falls back to "__op__()" and/or "__rop__()", the\n  normal coercion rules apply.\n\n* In "x + y", if *x* is a sequence that implements sequence\n  concatenation, sequence concatenation is invoked.\n\n* In "x * y", if one operand is a sequence that implements sequence\n  repetition, and the other is an integer ("int" or "long"), sequence\n  repetition is invoked.\n\n* Rich comparisons (implemented by methods "__eq__()" and so on)\n  never use coercion.  Three-way comparison (implemented by\n  "__cmp__()") does use coercion under the same conditions as other\n  binary operations use it.\n\n* In the current implementation, the built-in numeric types "int",\n  "long", "float", and "complex" do not use coercion. All these types\n  implement a "__coerce__()" method, for use by the built-in\n  "coerce()" function.\n\n  Changed in version 2.7: The complex type no longer makes implicit\n  calls to the "__coerce__()" method for mixed-type binary arithmetic\n  operations.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code.  Context managers are normally\ninvoked using the "with" statement (described in section The with\nstatement), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see Context Manager Types.\n\nobject.__enter__(self)\n\n   Enter the runtime context related to this object. The "with"\n   statement will bind this method\'s return value to the target(s)\n   specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n   Exit the runtime context related to this object. The parameters\n   describe the exception that caused the context to be exited. If the\n   context was exited without an exception, all three arguments will\n   be "None".\n\n   If an exception is supplied, and the method wishes to suppress the\n   exception (i.e., prevent it from being propagated), it should\n   return a true value. Otherwise, the exception will be processed\n   normally upon exit from this method.\n\n   Note that "__exit__()" methods should not reraise the passed-in\n   exception; this is the caller\'s responsibility.\n\nSee also:\n\n  **PEP 0343** - The "with" statement\n     The specification, background, and examples for the Python "with"\n     statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n"x.__getitem__(i)" or implicitly as in "x[i]".\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n   >>> class C:\n   ...     pass\n   ...\n   >>> c1 = C()\n   >>> c2 = C()\n   >>> c1.__len__ = lambda: 5\n   >>> c2.__len__ = lambda: 9\n   >>> len(c1)\n   5\n   >>> len(c2)\n   9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary.  That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n   >>> class C(object):\n   ...     pass\n   ...\n   >>> c = C()\n   >>> c.__len__ = lambda: 5\n   >>> len(c)\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in <module>\n   TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n   >>> 1 .__hash__() == hash(1)\n   True\n   >>> int.__hash__() == hash(int)\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in <module>\n   TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n   >>> type(1).__hash__(1) == hash(1)\n   True\n   >>> type(int).__hash__(int) == hash(int)\n   True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n   >>> class Meta(type):\n   ...    def __getattribute__(*args):\n   ...       print "Metaclass getattribute invoked"\n   ...       return type.__getattribute__(*args)\n   ...\n   >>> class C(object):\n   ...     __metaclass__ = Meta\n   ...     def __len__(self):\n   ...         return 10\n   ...     def __getattribute__(*args):\n   ...         print "Class getattribute invoked"\n   ...         return object.__getattribute__(*args)\n   ...\n   >>> c = C()\n   >>> c.__len__()                 # Explicit lookup via instance\n   Class getattribute invoked\n   10\n   >>> type(c).__len__(c)          # Explicit lookup via type\n   Metaclass getattribute invoked\n   10\n   >>> len(c)                      # Implicit lookup\n   10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n    under certain controlled conditions. It generally isn\'t a good\n    idea though, since it can lead to some very strange behaviour if\n    it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n    reflected method (such as "__add__()") fails the operation is not\n    supported, which is why the reflected method is not called.\n',
+ 'string-methods': u'\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support.  Some of them are also available on\n"bytearray" objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange section. To output formatted strings use\ntemplate strings or the "%" operator described in the String\nFormatting Operations section. Also, see the "re" module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n   Return a copy of the string with its first character capitalized\n   and the rest lowercased.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n   Return centered in a string of length *width*. Padding is done\n   using the specified *fillchar* (default is a space).\n\n   Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n   Return the number of non-overlapping occurrences of substring *sub*\n   in the range [*start*, *end*].  Optional arguments *start* and\n   *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n   Decodes the string using the codec registered for *encoding*.\n   *encoding* defaults to the default string encoding.  *errors* may\n   be given to set a different error handling scheme.  The default is\n   "\'strict\'", meaning that encoding errors raise "UnicodeError".\n   Other possible values are "\'ignore\'", "\'replace\'" and any other\n   name registered via "codecs.register_error()", see section Codec\n   Base Classes.\n\n   New in version 2.2.\n\n   Changed in version 2.3: Support for other error handling schemes\n   added.\n\n   Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n   Return an encoded version of the string.  Default encoding is the\n   current default string encoding.  *errors* may be given to set a\n   different error handling scheme.  The default for *errors* is\n   "\'strict\'", meaning that encoding errors raise a "UnicodeError".\n   Other possible values are "\'ignore\'", "\'replace\'",\n   "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other name\n   registered via "codecs.register_error()", see section Codec Base\n   Classes. For a list of possible encodings, see section Standard\n   Encodings.\n\n   New in version 2.0.\n\n   Changed in version 2.3: Support for "\'xmlcharrefreplace\'" and\n   "\'backslashreplace\'" and other error handling schemes added.\n\n   Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n   Return "True" if the string ends with the specified *suffix*,\n   otherwise return "False".  *suffix* can also be a tuple of suffixes\n   to look for.  With optional *start*, test beginning at that\n   position.  With optional *end*, stop comparing at that position.\n\n   Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n   Return a copy of the string where all tab characters are replaced\n   by one or more spaces, depending on the current column and the\n   given tab size.  Tab positions occur every *tabsize* characters\n   (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n   To expand the string, the current column is set to zero and the\n   string is examined character by character.  If the character is a\n   tab ("\\t"), one or more space characters are inserted in the result\n   until the current column is equal to the next tab position. (The\n   tab character itself is not copied.)  If the character is a newline\n   ("\\n") or return ("\\r"), it is copied and the current column is\n   reset to zero.  Any other character is copied unchanged and the\n   current column is incremented by one regardless of how the\n   character is represented when printed.\n\n   >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n   \'01      012     0123    01234\'\n   >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n   \'01  012 0123    01234\'\n\nstr.find(sub[, start[, end]])\n\n   Return the lowest index in the string where substring *sub* is\n   found within the slice "s[start:end]".  Optional arguments *start*\n   and *end* are interpreted as in slice notation.  Return "-1" if\n   *sub* is not found.\n\n   Note: The "find()" method should be used only if you need to know\n     the position of *sub*.  To check if *sub* is a substring or not,\n     use the "in" operator:\n\n        >>> \'Py\' in \'Python\'\n        True\n\nstr.format(*args, **kwargs)\n\n   Perform a string formatting operation.  The string on which this\n   method is called can contain literal text or replacement fields\n   delimited by braces "{}".  Each replacement field contains either\n   the numeric index of a positional argument, or the name of a\n   keyword argument.  Returns a copy of the string where each\n   replacement field is replaced with the string value of the\n   corresponding argument.\n\n   >>> "The sum of 1 + 2 is {0}".format(1+2)\n   \'The sum of 1 + 2 is 3\'\n\n   See Format String Syntax for a description of the various\n   formatting options that can be specified in format strings.\n\n   This method of string formatting is the new standard in Python 3,\n   and should be preferred to the "%" formatting described in String\n   Formatting Operations in new code.\n\n   New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n   Like "find()", but raise "ValueError" when the substring is not\n   found.\n\nstr.isalnum()\n\n   Return true if all characters in the string are alphanumeric and\n   there is at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n   Return true if all characters in the string are alphabetic and\n   there is at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n   Return true if all characters in the string are digits and there is\n   at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n   Return true if all cased characters [4] in the string are lowercase\n   and there is at least one cased character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n   Return true if there are only whitespace characters in the string\n   and there is at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n   Return true if the string is a titlecased string and there is at\n   least one character, for example uppercase characters may only\n   follow uncased characters and lowercase characters only cased ones.\n   Return false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n   Return true if all cased characters [4] in the string are uppercase\n   and there is at least one cased character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n   Return a string which is the concatenation of the strings in the\n   *iterable* *iterable*.  The separator between elements is the\n   string providing this method.\n\nstr.ljust(width[, fillchar])\n\n   Return the string left justified in a string of length *width*.\n   Padding is done using the specified *fillchar* (default is a\n   space).  The original string is returned if *width* is less than or\n   equal to "len(s)".\n\n   Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n   Return a copy of the string with all the cased characters [4]\n   converted to lowercase.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n   Return a copy of the string with leading characters removed.  The\n   *chars* argument is a string specifying the set of characters to be\n   removed.  If omitted or "None", the *chars* argument defaults to\n   removing whitespace.  The *chars* argument is not a prefix; rather,\n   all combinations of its values are stripped:\n\n   >>> \'   spacious   \'.lstrip()\n   \'spacious   \'\n   >>> \'www.example.com\'.lstrip(\'cmowz.\')\n   \'example.com\'\n\n   Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n   Split the string at the first occurrence of *sep*, and return a\n   3-tuple containing the part before the separator, the separator\n   itself, and the part after the separator.  If the separator is not\n   found, return a 3-tuple containing the string itself, followed by\n   two empty strings.\n\n   New in version 2.5.\n\nstr.replace(old, new[, count])\n\n   Return a copy of the string with all occurrences of substring *old*\n   replaced by *new*.  If the optional argument *count* is given, only\n   the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n   Return the highest index in the string where substring *sub* is\n   found, such that *sub* is contained within "s[start:end]".\n   Optional arguments *start* and *end* are interpreted as in slice\n   notation.  Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n   Like "rfind()" but raises "ValueError" when the substring *sub* is\n   not found.\n\nstr.rjust(width[, fillchar])\n\n   Return the string right justified in a string of length *width*.\n   Padding is done using the specified *fillchar* (default is a\n   space). The original string is returned if *width* is less than or\n   equal to "len(s)".\n\n   Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n   Split the string at the last occurrence of *sep*, and return a\n   3-tuple containing the part before the separator, the separator\n   itself, and the part after the separator.  If the separator is not\n   found, return a 3-tuple containing two empty strings, followed by\n   the string itself.\n\n   New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n   Return a list of the words in the string, using *sep* as the\n   delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n   are done, the *rightmost* ones.  If *sep* is not specified or\n   "None", any whitespace string is a separator.  Except for splitting\n   from the right, "rsplit()" behaves like "split()" which is\n   described in detail below.\n\n   New in version 2.4.\n\nstr.rstrip([chars])\n\n   Return a copy of the string with trailing characters removed.  The\n   *chars* argument is a string specifying the set of characters to be\n   removed.  If omitted or "None", the *chars* argument defaults to\n   removing whitespace.  The *chars* argument is not a suffix; rather,\n   all combinations of its values are stripped:\n\n   >>> \'   spacious   \'.rstrip()\n   \'   spacious\'\n   >>> \'mississippi\'.rstrip(\'ipz\')\n   \'mississ\'\n\n   Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n   Return a list of the words in the string, using *sep* as the\n   delimiter string.  If *maxsplit* is given, at most *maxsplit*\n   splits are done (thus, the list will have at most "maxsplit+1"\n   elements).  If *maxsplit* is not specified or "-1", then there is\n   no limit on the number of splits (all possible splits are made).\n\n   If *sep* is given, consecutive delimiters are not grouped together\n   and are deemed to delimit empty strings (for example,\n   "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']").  The *sep* argument\n   may consist of multiple characters (for example,\n   "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n   empty string with a specified separator returns "[\'\']".\n\n   If *sep* is not specified or is "None", a different splitting\n   algorithm is applied: runs of consecutive whitespace are regarded\n   as a single separator, and the result will contain no empty strings\n   at the start or end if the string has leading or trailing\n   whitespace.  Consequently, splitting an empty string or a string\n   consisting of just whitespace with a "None" separator returns "[]".\n\n   For example, "\' 1  2   3  \'.split()" returns "[\'1\', \'2\', \'3\']", and\n   "\'  1  2   3  \'.split(None, 1)" returns "[\'1\', \'2   3  \']".\n\nstr.splitlines([keepends])\n\n   Return a list of the lines in the string, breaking at line\n   boundaries. This method uses the *universal newlines* approach to\n   splitting lines. Line breaks are not included in the resulting list\n   unless *keepends* is given and true.\n\n   For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n   c\', \'\', \'de fg\', \'kl\']", while the same call with\n   "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']".\n\n   Unlike "split()" when a delimiter string *sep* is given, this\n   method returns an empty list for the empty string, and a terminal\n   line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n   Return "True" if string starts with the *prefix*, otherwise return\n   "False". *prefix* can also be a tuple of prefixes to look for.\n   With optional *start*, test string beginning at that position.\n   With optional *end*, stop comparing string at that position.\n\n   Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n   Return a copy of the string with the leading and trailing\n   characters removed. The *chars* argument is a string specifying the\n   set of characters to be removed. If omitted or "None", the *chars*\n   argument defaults to removing whitespace. The *chars* argument is\n   not a prefix or suffix; rather, all combinations of its values are\n   stripped:\n\n   >>> \'   spacious   \'.strip()\n   \'spacious\'\n   >>> \'www.example.com\'.strip(\'cmowz.\')\n   \'example\'\n\n   Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n   Return a copy of the string with uppercase characters converted to\n   lowercase and vice versa.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n   Return a titlecased version of the string where words start with an\n   uppercase character and the remaining characters are lowercase.\n\n   The algorithm uses a simple language-independent definition of a\n   word as groups of consecutive letters.  The definition works in\n   many contexts but it means that apostrophes in contractions and\n   possessives form word boundaries, which may not be the desired\n   result:\n\n      >>> "they\'re bill\'s friends from the UK".title()\n      "They\'Re Bill\'S Friends From The Uk"\n\n   A workaround for apostrophes can be constructed using regular\n   expressions:\n\n      >>> import re\n      >>> def titlecase(s):\n      ...     return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n      ...                   lambda mo: mo.group(0)[0].upper() +\n      ...                              mo.group(0)[1:].lower(),\n      ...                   s)\n      ...\n      >>> titlecase("they\'re bill\'s friends.")\n      "They\'re Bill\'s Friends."\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n   Return a copy of the string where all characters occurring in the\n   optional argument *deletechars* are removed, and the remaining\n   characters have been mapped through the given translation table,\n   which must be a string of length 256.\n\n   You can use the "maketrans()" helper function in the "string"\n   module to create a translation table. For string objects, set the\n   *table* argument to "None" for translations that only delete\n   characters:\n\n   >>> \'read this short text\'.translate(None, \'aeiou\')\n   \'rd ths shrt txt\'\n\n   New in version 2.6: Support for a "None" *table* argument.\n\n   For Unicode objects, the "translate()" method does not accept the\n   optional *deletechars* argument.  Instead, it returns a copy of the\n   *s* where all characters have been mapped through the given\n   translation table which must be a mapping of Unicode ordinals to\n   Unicode ordinals, Unicode strings or "None". Unmapped characters\n   are left untouched. Characters mapped to "None" are deleted.  Note,\n   a more flexible approach is to create a custom character mapping\n   codec using the "codecs" module (see "encodings.cp1251" for an\n   example).\n\nstr.upper()\n\n   Return a copy of the string with all the cased characters [4]\n   converted to uppercase.  Note that "str.upper().isupper()" might be\n   "False" if "s" contains uncased characters or if the Unicode\n   category of the resulting character(s) is not "Lu" (Letter,\n   uppercase), but e.g. "Lt" (Letter, titlecase).\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n   Return the numeric string left filled with zeros in a string of\n   length *width*.  A sign prefix is handled correctly.  The original\n   string is returned if *width* is less than or equal to "len(s)".\n\n   New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n   Return "True" if there are only numeric characters in S, "False"\n   otherwise. Numeric characters include digit characters, and all\n   characters that have the Unicode numeric value property, e.g.\n   U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n   Return "True" if there are only decimal characters in S, "False"\n   otherwise. Decimal characters include digit characters, and all\n   characters that can be used to form decimal-radix numbers, e.g.\n   U+0660, ARABIC-INDIC DIGIT ZERO.\n',
  'strings': u'\nString literals\n***************\n\nString literals are described by the following lexical definitions:\n\n   stringliteral   ::= [stringprefix](shortstring | longstring)\n   stringprefix    ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" | "uR"\n                    | "b" | "B" | "br" | "Br" | "bR" | "BR"\n   shortstring     ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n   longstring      ::= "\'\'\'" longstringitem* "\'\'\'"\n                  | \'"""\' longstringitem* \'"""\'\n   shortstringitem ::= shortstringchar | escapeseq\n   longstringitem  ::= longstringchar | escapeseq\n   shortstringchar ::= <any source character except "\\" or newline or the quote>\n   longstringchar  ::= <any source character except "\\">\n   escapeseq       ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" and the rest of\nthe string literal. The source character set is defined by the\nencoding declaration; it is ASCII if no encoding declaration is given\nin the source file; see section Encoding declarations.\n\nIn plain English: String literals can be enclosed in matching single\nquotes ("\'") or double quotes (""").  They can also be enclosed in\nmatching groups of three single or double quotes (these are generally\nreferred to as *triple-quoted strings*).  The backslash ("\\")\ncharacter is used to escape characters that otherwise have a special\nmeaning, such as newline, backslash itself, or the quote character.\nString literals may optionally be prefixed with a letter "\'r\'" or\n"\'R\'"; such strings are called *raw strings* and use different rules\nfor interpreting backslash escape sequences.  A prefix of "\'u\'" or\n"\'U\'" makes the string a Unicode string.  Unicode strings use the\nUnicode character set as defined by the Unicode Consortium and ISO\n10646.  Some additional escape sequences, described below, are\navailable in Unicode strings. A prefix of "\'b\'" or "\'B\'" is ignored in\nPython 2; it indicates that the literal should become a bytes literal\nin Python 3 (e.g. when code is automatically converted with 2to3).  A\n"\'u\'" or "\'b\'" prefix may be followed by an "\'r\'" prefix.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string.  (A "quote" is the character used to open the\nstring, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C.  The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence   | Meaning                           | Notes   |\n+===================+===================================+=========+\n| "\\newline"        | Ignored                           |         |\n+-------------------+-----------------------------------+---------+\n| "\\\\"              | Backslash ("\\")                   |         |\n+-------------------+-----------------------------------+---------+\n| "\\\'"              | Single quote ("\'")                |         |\n+-------------------+-----------------------------------+---------+\n| "\\""              | Double quote (""")                |         |\n+-------------------+-----------------------------------+---------+\n| "\\a"              | ASCII Bell (BEL)                  |         |\n+-------------------+-----------------------------------+---------+\n| "\\b"              | ASCII Backspace (BS)              |         |\n+-------------------+-----------------------------------+---------+\n| "\\f"              | ASCII Formfeed (FF)               |         |\n+-------------------+-----------------------------------+---------+\n| "\\n"              | ASCII Linefeed (LF)               |         |\n+-------------------+-----------------------------------+---------+\n| "\\N{name}"        | Character named *name* in the     |         |\n|                   | Unicode database (Unicode only)   |         |\n+-------------------+-----------------------------------+---------+\n| "\\r"              | ASCII Carriage Return (CR)        |         |\n+-------------------+-----------------------------------+---------+\n| "\\t"              | ASCII Horizontal Tab (TAB)        |         |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx"          | Character with 16-bit hex value   | (1)     |\n|                   | *xxxx* (Unicode only)             |         |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx"      | Character with 32-bit hex value   | (2)     |\n|                   | *xxxxxxxx* (Unicode only)         |         |\n+-------------------+-----------------------------------+---------+\n| "\\v"              | ASCII Vertical Tab (VT)           |         |\n+-------------------+-----------------------------------+---------+\n| "\\ooo"            | Character with octal value *ooo*  | (3,5)   |\n+-------------------+-----------------------------------+---------+\n| "\\xhh"            | Character with hex value *hh*     | (4,5)   |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. Individual code units which form parts of a surrogate pair can\n   be encoded using this escape sequence.\n\n2. Any Unicode character can be encoded this way, but characters\n   outside the Basic Multilingual Plane (BMP) will be encoded using a\n   surrogate pair if Python is compiled to use 16-bit code units (the\n   default).\n\n3. As in Standard C, up to three octal digits are accepted.\n\n4. Unlike in Standard C, exactly two hex digits are required.\n\n5. In a string literal, hexadecimal and octal escapes denote the\n   byte with the given value; it is not necessary that the byte\n   encodes a character in the source character set. In a Unicode\n   literal, these escapes denote a Unicode character with the given\n   value.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*.  (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.)  It is also\nimportant to note that the escape sequences marked as "(Unicode only)"\nin the table above fall into the category of unrecognized escapes for\nnon-Unicode string literals.\n\nWhen an "\'r\'" or "\'R\'" prefix is present, a character following a\nbackslash is included in the string without change, and *all\nbackslashes are left in the string*.  For example, the string literal\n"r"\\n"" consists of two characters: a backslash and a lowercase "\'n\'".\nString quotes can be escaped with a backslash, but the backslash\nremains in the string; for example, "r"\\""" is a valid string literal\nconsisting of two characters: a backslash and a double quote; "r"\\""\nis not a valid string literal (even a raw string cannot end in an odd\nnumber of backslashes).  Specifically, *a raw string cannot end in a\nsingle backslash* (since the backslash would escape the following\nquote character).  Note also that a single backslash followed by a\nnewline is interpreted as those two characters as part of the string,\n*not* as a line continuation.\n\nWhen an "\'r\'" or "\'R\'" prefix is used in conjunction with a "\'u\'" or\n"\'U\'" prefix, then the "\\uXXXX" and "\\UXXXXXXXX" escape sequences are\nprocessed while  *all other backslashes are left in the string*. For\nexample, the string literal "ur"\\u0062\\n"" consists of three Unicode\ncharacters: \'LATIN SMALL LETTER B\', \'REVERSE SOLIDUS\', and \'LATIN\nSMALL LETTER N\'. Backslashes can be escaped with a preceding\nbackslash; however, both remain in the string.  As a result, "\\uXXXX"\nescape sequences are only recognized when there are an odd number of\nbackslashes.\n',
  'subscriptions': u'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n   subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object of a sequence or mapping type.\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey.  (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to a\nplain integer.  If this value is negative, the length of the sequence\nis added to it (so that, e.g., "x[-1]" selects the last item of "x".)\nThe resulting value must be a nonnegative integer less than the number\nof items in the sequence, and the subscription selects the item whose\nindex is that value (counting from zero).\n\nA string\'s items are characters.  A character is not a separate data\ntype but a string of exactly one character.\n',
  'truth': u'\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an "if" or\n"while" condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* "None"\n\n* "False"\n\n* zero of any numeric type, for example, "0", "0L", "0.0", "0j".\n\n* any empty sequence, for example, "\'\'", "()", "[]".\n\n* any empty mapping, for example, "{}".\n\n* instances of user-defined classes, if the class defines a\n  "__nonzero__()" or "__len__()" method, when that method returns the\n  integer zero or "bool" value "False". [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn "0" or "False" for false and "1" or "True" for true, unless\notherwise stated. (Important exception: the Boolean operations "or"\nand "and" always return one of their operands.)\n',
  'try': u'\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n   try_stmt  ::= try1_stmt | try2_stmt\n   try1_stmt ::= "try" ":" suite\n                 ("except" [expression [("as" | ",") identifier]] ":" suite)+\n                 ["else" ":" suite]\n                 ["finally" ":" suite]\n   try2_stmt ::= "try" ":" suite\n                 "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n"try"..."except"..."finally" did not work. "try"..."except" had to be\nnested in "try"..."finally".\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started.  This search inspects the except clauses\nin turn until one is found that matches the exception.  An expression-\nless except clause, if present, must be last; it matches any\nexception.  For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception.  An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject, or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed.  All except clauses must have an\nexecutable block.  When the end of this block is reached, execution\ncontinues normally after the entire try statement.  (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the "sys" module:\n"sys.exc_type" receives the object identifying the exception;\n"sys.exc_value" receives the exception\'s parameter;\n"sys.exc_traceback" receives a traceback object (see section The\nstandard type hierarchy) identifying the point in the program where\nthe exception occurred. These details are also available through the\n"sys.exc_info()" function, which returns a tuple "(exc_type,\nexc_value, exc_traceback)".  Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program.  As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler.  The "try"\nclause is executed, including any "except" and "else" clauses.  If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed.  If\nthere is a saved exception, it is re-raised at the end of the\n"finally" clause. If the "finally" clause raises another exception or\nexecutes a "return" or "break" statement, the saved exception is\ndiscarded:\n\n   >>> def f():\n   ...     try:\n   ...         1/0\n   ...     finally:\n   ...         return 42\n   ...\n   >>> f()\n   42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed.  Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n   >>> def foo():\n   ...     try:\n   ...         return \'try\'\n   ...     finally:\n   ...         return \'finally\'\n   ...\n   >>> foo()\n   \'finally\'\n\nAdditional information on exceptions can be found in section\nExceptions, and information on using the "raise" statement to generate\nexceptions may be found in section The raise statement.\n',
  'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python.  Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types.  Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\'  These are attributes that provide access to the\nimplementation and are not intended for general use.  Their definition\nmay change in the future.\n\nNone\n   This type has a single value.  There is a single object with this\n   value. This object is accessed through the built-in name "None". It\n   is used to signify the absence of a value in many situations, e.g.,\n   it is returned from functions that don\'t explicitly return\n   anything. Its truth value is false.\n\nNotImplemented\n   This type has a single value.  There is a single object with this\n   value. This object is accessed through the built-in name\n   "NotImplemented". Numeric methods and rich comparison methods may\n   return this value if they do not implement the operation for the\n   operands provided.  (The interpreter will then try the reflected\n   operation, or some other fallback, depending on the operator.)  Its\n   truth value is true.\n\nEllipsis\n   This type has a single value.  There is a single object with this\n   value. This object is accessed through the built-in name\n   "Ellipsis". It is used to indicate the presence of the "..." syntax\n   in a slice.  Its truth value is true.\n\n"numbers.Number"\n   These are created by numeric literals and returned as results by\n   arithmetic operators and arithmetic built-in functions.  Numeric\n   objects are immutable; once created their value never changes.\n   Python numbers are of course strongly related to mathematical\n   numbers, but subject to the limitations of numerical representation\n   in computers.\n\n   Python distinguishes between integers, floating point numbers, and\n   complex numbers:\n\n   "numbers.Integral"\n      These represent elements from the mathematical set of integers\n      (positive and negative).\n\n      There are three types of integers:\n\n      Plain integers\n         These represent numbers in the range -2147483648 through\n         2147483647. (The range may be larger on machines with a\n         larger natural word size, but not smaller.)  When the result\n         of an operation would fall outside this range, the result is\n         normally returned as a long integer (in some cases, the\n         exception "OverflowError" is raised instead).  For the\n         purpose of shift and mask operations, integers are assumed to\n         have a binary, 2\'s complement notation using 32 or more bits,\n         and hiding no bits from the user (i.e., all 4294967296\n         different bit patterns correspond to different values).\n\n      Long integers\n         These represent numbers in an unlimited range, subject to\n         available (virtual) memory only.  For the purpose of shift\n         and mask operations, a binary representation is assumed, and\n         negative numbers are represented in a variant of 2\'s\n         complement which gives the illusion of an infinite string of\n         sign bits extending to the left.\n\n      Booleans\n         These represent the truth values False and True.  The two\n         objects representing the values "False" and "True" are the\n         only Boolean objects. The Boolean type is a subtype of plain\n         integers, and Boolean values behave like the values 0 and 1,\n         respectively, in almost all contexts, the exception being\n         that when converted to a string, the strings ""False"" or\n         ""True"" are returned, respectively.\n\n      The rules for integer representation are intended to give the\n      most meaningful interpretation of shift and mask operations\n      involving negative integers and the least surprises when\n      switching between the plain and long integer domains.  Any\n      operation, if it yields a result in the plain integer domain,\n      will yield the same result in the long integer domain or when\n      using mixed operands.  The switch between domains is transparent\n      to the programmer.\n\n   "numbers.Real" ("float")\n      These represent machine-level double precision floating point\n      numbers. You are at the mercy of the underlying machine\n      architecture (and C or Java implementation) for the accepted\n      range and handling of overflow. Python does not support single-\n      precision floating point numbers; the savings in processor and\n      memory usage that are usually the reason for using these are\n      dwarfed by the overhead of using objects in Python, so there is\n      no reason to complicate the language with two kinds of floating\n      point numbers.\n\n   "numbers.Complex"\n      These represent complex numbers as a pair of machine-level\n      double precision floating point numbers.  The same caveats apply\n      as for floating point numbers. The real and imaginary parts of a\n      complex number "z" can be retrieved through the read-only\n      attributes "z.real" and "z.imag".\n\nSequences\n   These represent finite ordered sets indexed by non-negative\n   numbers. The built-in function "len()" returns the number of items\n   of a sequence. When the length of a sequence is *n*, the index set\n   contains the numbers 0, 1, ..., *n*-1.  Item *i* of sequence *a* is\n   selected by "a[i]".\n\n   Sequences also support slicing: "a[i:j]" selects all items with\n   index *k* such that *i* "<=" *k* "<" *j*.  When used as an\n   expression, a slice is a sequence of the same type.  This implies\n   that the index set is renumbered so that it starts at 0.\n\n   Some sequences also support "extended slicing" with a third "step"\n   parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n   "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n   Sequences are distinguished according to their mutability:\n\n   Immutable sequences\n      An object of an immutable sequence type cannot change once it is\n      created.  (If the object contains references to other objects,\n      these other objects may be mutable and may be changed; however,\n      the collection of objects directly referenced by an immutable\n      object cannot change.)\n\n      The following types are immutable sequences:\n\n      Strings\n         The items of a string are characters.  There is no separate\n         character type; a character is represented by a string of one\n         item. Characters represent (at least) 8-bit bytes.  The\n         built-in functions "chr()" and "ord()" convert between\n         characters and nonnegative integers representing the byte\n         values.  Bytes with the values 0-127 usually represent the\n         corresponding ASCII values, but the interpretation of values\n         is up to the program.  The string data type is also used to\n         represent arrays of bytes, e.g., to hold data read from a\n         file.\n\n         (On systems whose native character set is not ASCII, strings\n         may use EBCDIC in their internal representation, provided the\n         functions "chr()" and "ord()" implement a mapping between\n         ASCII and EBCDIC, and string comparison preserves the ASCII\n         order. Or perhaps someone can propose a better rule?)\n\n      Unicode\n         The items of a Unicode object are Unicode code units.  A\n         Unicode code unit is represented by a Unicode object of one\n         item and can hold either a 16-bit or 32-bit value\n         representing a Unicode ordinal (the maximum value for the\n         ordinal is given in "sys.maxunicode", and depends on how\n         Python is configured at compile time).  Surrogate pairs may\n         be present in the Unicode object, and will be reported as two\n         separate items.  The built-in functions "unichr()" and\n         "ord()" convert between code units and nonnegative integers\n         representing the Unicode ordinals as defined in the Unicode\n         Standard 3.0. Conversion from and to other encodings are\n         possible through the Unicode method "encode()" and the built-\n         in function "unicode()".\n\n      Tuples\n         The items of a tuple are arbitrary Python objects. Tuples of\n         two or more items are formed by comma-separated lists of\n         expressions.  A tuple of one item (a \'singleton\') can be\n         formed by affixing a comma to an expression (an expression by\n         itself does not create a tuple, since parentheses must be\n         usable for grouping of expressions).  An empty tuple can be\n         formed by an empty pair of parentheses.\n\n   Mutable sequences\n      Mutable sequences can be changed after they are created.  The\n      subscription and slicing notations can be used as the target of\n      assignment and "del" (delete) statements.\n\n      There are currently two intrinsic mutable sequence types:\n\n      Lists\n         The items of a list are arbitrary Python objects.  Lists are\n         formed by placing a comma-separated list of expressions in\n         square brackets. (Note that there are no special cases needed\n         to form lists of length 0 or 1.)\n\n      Byte Arrays\n         A bytearray object is a mutable array. They are created by\n         the built-in "bytearray()" constructor.  Aside from being\n         mutable (and hence unhashable), byte arrays otherwise provide\n         the same interface and functionality as immutable bytes\n         objects.\n\n      The extension module "array" provides an additional example of a\n      mutable sequence type.\n\nSet types\n   These represent unordered, finite sets of unique, immutable\n   objects. As such, they cannot be indexed by any subscript. However,\n   they can be iterated over, and the built-in function "len()"\n   returns the number of items in a set. Common uses for sets are fast\n   membership testing, removing duplicates from a sequence, and\n   computing mathematical operations such as intersection, union,\n   difference, and symmetric difference.\n\n   For set elements, the same immutability rules apply as for\n   dictionary keys. Note that numeric types obey the normal rules for\n   numeric comparison: if two numbers compare equal (e.g., "1" and\n   "1.0"), only one of them can be contained in a set.\n\n   There are currently two intrinsic set types:\n\n   Sets\n      These represent a mutable set. They are created by the built-in\n      "set()" constructor and can be modified afterwards by several\n      methods, such as "add()".\n\n   Frozen sets\n      These represent an immutable set.  They are created by the\n      built-in "frozenset()" constructor.  As a frozenset is immutable\n      and *hashable*, it can be used again as an element of another\n      set, or as a dictionary key.\n\nMappings\n   These represent finite sets of objects indexed by arbitrary index\n   sets. The subscript notation "a[k]" selects the item indexed by "k"\n   from the mapping "a"; this can be used in expressions and as the\n   target of assignments or "del" statements. The built-in function\n   "len()" returns the number of items in a mapping.\n\n   There is currently a single intrinsic mapping type:\n\n   Dictionaries\n      These represent finite sets of objects indexed by nearly\n      arbitrary values.  The only types of values not acceptable as\n      keys are values containing lists or dictionaries or other\n      mutable types that are compared by value rather than by object\n      identity, the reason being that the efficient implementation of\n      dictionaries requires a key\'s hash value to remain constant.\n      Numeric types used for keys obey the normal rules for numeric\n      comparison: if two numbers compare equal (e.g., "1" and "1.0")\n      then they can be used interchangeably to index the same\n      dictionary entry.\n\n      Dictionaries are mutable; they can be created by the "{...}"\n      notation (see section Dictionary displays).\n\n      The extension modules "dbm", "gdbm", and "bsddb" provide\n      additional examples of mapping types.\n\nCallable types\n   These are the types to which the function call operation (see\n   section Calls) can be applied:\n\n   User-defined functions\n      A user-defined function object is created by a function\n      definition (see section Function definitions).  It should be\n      called with an argument list containing the same number of items\n      as the function\'s formal parameter list.\n\n      Special attributes:\n\n      +-------------------------+---------------------------------+-------------+\n      | Attribute               | Meaning                         |             |\n      +=========================+=================================+=============+\n      | "__doc__" "func_doc"    | The function\'s documentation    | Writable    |\n      |                         | string, or "None" if            |             |\n      |                         | unavailable.                    |             |\n      +-------------------------+---------------------------------+-------------+\n      | "__name__" "func_name"  | The function\'s name.            | Writable    |\n      +-------------------------+---------------------------------+-------------+\n      | "__module__"            | The name of the module the      | Writable    |\n      |                         | function was defined in, or     |             |\n      |                         | "None" if unavailable.          |             |\n      +-------------------------+---------------------------------+-------------+\n      | "__defaults__"          | A tuple containing default      | Writable    |\n      | "func_defaults"         | argument values for those       |             |\n      |                         | arguments that have defaults,   |             |\n      |                         | or "None" if no arguments have  |             |\n      |                         | a default value.                |             |\n      +-------------------------+---------------------------------+-------------+\n      | "__code__" "func_code"  | The code object representing    | Writable    |\n      |                         | the compiled function body.     |             |\n      +-------------------------+---------------------------------+-------------+\n      | "__globals__"           | A reference to the dictionary   | Read-only   |\n      | "func_globals"          | that holds the function\'s       |             |\n      |                         | global variables --- the global |             |\n      |                         | namespace of the module in      |             |\n      |                         | which the function was defined. |             |\n      +-------------------------+---------------------------------+-------------+\n      | "__dict__" "func_dict"  | The namespace supporting        | Writable    |\n      |                         | arbitrary function attributes.  |             |\n      +-------------------------+---------------------------------+-------------+\n      | "__closure__"           | "None" or a tuple of cells that | Read-only   |\n      | "func_closure"          | contain bindings for the        |             |\n      |                         | function\'s free variables.      |             |\n      +-------------------------+---------------------------------+-------------+\n\n      Most of the attributes labelled "Writable" check the type of the\n      assigned value.\n\n      Changed in version 2.4: "func_name" is now writable.\n\n      Changed in version 2.6: The double-underscore attributes\n      "__closure__", "__code__", "__defaults__", and "__globals__"\n      were introduced as aliases for the corresponding "func_*"\n      attributes for forwards compatibility with Python 3.\n\n      Function objects also support getting and setting arbitrary\n      attributes, which can be used, for example, to attach metadata\n      to functions.  Regular attribute dot-notation is used to get and\n      set such attributes. *Note that the current implementation only\n      supports function attributes on user-defined functions. Function\n      attributes on built-in functions may be supported in the\n      future.*\n\n      Additional information about a function\'s definition can be\n      retrieved from its code object; see the description of internal\n      types below.\n\n   User-defined methods\n      A user-defined method object combines a class, a class instance\n      (or "None") and any callable object (normally a user-defined\n      function).\n\n      Special read-only attributes: "im_self" is the class instance\n      object, "im_func" is the function object; "im_class" is the\n      class of "im_self" for bound methods or the class that asked for\n      the method for unbound methods; "__doc__" is the method\'s\n      documentation (same as "im_func.__doc__"); "__name__" is the\n      method name (same as "im_func.__name__"); "__module__" is the\n      name of the module the method was defined in, or "None" if\n      unavailable.\n\n      Changed in version 2.2: "im_self" used to refer to the class\n      that defined the method.\n\n      Changed in version 2.6: For Python 3 forward-compatibility,\n      "im_func" is also available as "__func__", and "im_self" as\n      "__self__".\n\n      Methods also support accessing (but not setting) the arbitrary\n      function attributes on the underlying function object.\n\n      User-defined method objects may be created when getting an\n      attribute of a class (perhaps via an instance of that class), if\n      that attribute is a user-defined function object, an unbound\n      user-defined method object, or a class method object. When the\n      attribute is a user-defined method object, a new method object\n      is only created if the class from which it is being retrieved is\n      the same as, or a derived class of, the class stored in the\n      original method object; otherwise, the original method object is\n      used as it is.\n\n      When a user-defined method object is created by retrieving a\n      user-defined function object from a class, its "im_self"\n      attribute is "None" and the method object is said to be unbound.\n      When one is created by retrieving a user-defined function object\n      from a class via one of its instances, its "im_self" attribute\n      is the instance, and the method object is said to be bound. In\n      either case, the new method\'s "im_class" attribute is the class\n      from which the retrieval takes place, and its "im_func"\n      attribute is the original function object.\n\n      When a user-defined method object is created by retrieving\n      another method object from a class or instance, the behaviour is\n      the same as for a function object, except that the "im_func"\n      attribute of the new instance is not the original method object\n      but its "im_func" attribute.\n\n      When a user-defined method object is created by retrieving a\n      class method object from a class or instance, its "im_self"\n      attribute is the class itself, and its "im_func" attribute is\n      the function object underlying the class method.\n\n      When an unbound user-defined method object is called, the\n      underlying function ("im_func") is called, with the restriction\n      that the first argument must be an instance of the proper class\n      ("im_class") or of a derived class thereof.\n\n      When a bound user-defined method object is called, the\n      underlying function ("im_func") is called, inserting the class\n      instance ("im_self") in front of the argument list.  For\n      instance, when "C" is a class which contains a definition for a\n      function "f()", and "x" is an instance of "C", calling "x.f(1)"\n      is equivalent to calling "C.f(x, 1)".\n\n      When a user-defined method object is derived from a class method\n      object, the "class instance" stored in "im_self" will actually\n      be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n      is equivalent to calling "f(C,1)" where "f" is the underlying\n      function.\n\n      Note that the transformation from function object to (unbound or\n      bound) method object happens each time the attribute is\n      retrieved from the class or instance. In some cases, a fruitful\n      optimization is to assign the attribute to a local variable and\n      call that local variable. Also notice that this transformation\n      only happens for user-defined functions; other callable objects\n      (and all non-callable objects) are retrieved without\n      transformation.  It is also important to note that user-defined\n      functions which are attributes of a class instance are not\n      converted to bound methods; this *only* happens when the\n      function is an attribute of the class.\n\n   Generator functions\n      A function or method which uses the "yield" statement (see\n      section The yield statement) is called a *generator function*.\n      Such a function, when called, always returns an iterator object\n      which can be used to execute the body of the function:  calling\n      the iterator\'s "next()" method will cause the function to\n      execute until it provides a value using the "yield" statement.\n      When the function executes a "return" statement or falls off the\n      end, a "StopIteration" exception is raised and the iterator will\n      have reached the end of the set of values to be returned.\n\n   Built-in functions\n      A built-in function object is a wrapper around a C function.\n      Examples of built-in functions are "len()" and "math.sin()"\n      ("math" is a standard built-in module). The number and type of\n      the arguments are determined by the C function. Special read-\n      only attributes: "__doc__" is the function\'s documentation\n      string, or "None" if unavailable; "__name__" is the function\'s\n      name; "__self__" is set to "None" (but see the next item);\n      "__module__" is the name of the module the function was defined\n      in or "None" if unavailable.\n\n   Built-in methods\n      This is really a different disguise of a built-in function, this\n      time containing an object passed to the C function as an\n      implicit extra argument.  An example of a built-in method is\n      "alist.append()", assuming *alist* is a list object. In this\n      case, the special read-only attribute "__self__" is set to the\n      object denoted by *alist*.\n\n   Class Types\n      Class types, or "new-style classes," are callable.  These\n      objects normally act as factories for new instances of\n      themselves, but variations are possible for class types that\n      override "__new__()".  The arguments of the call are passed to\n      "__new__()" and, in the typical case, to "__init__()" to\n      initialize the new instance.\n\n   Classic Classes\n      Class objects are described below.  When a class object is\n      called, a new class instance (also described below) is created\n      and returned.  This implies a call to the class\'s "__init__()"\n      method if it has one.  Any arguments are passed on to the\n      "__init__()" method.  If there is no "__init__()" method, the\n      class must be called without arguments.\n\n   Class instances\n      Class instances are described below.  Class instances are\n      callable only when the class has a "__call__()" method;\n      "x(arguments)" is a shorthand for "x.__call__(arguments)".\n\nModules\n   Modules are imported by the "import" statement (see section The\n   import statement). A module object has a namespace implemented by a\n   dictionary object (this is the dictionary referenced by the\n   func_globals attribute of functions defined in the module).\n   Attribute references are translated to lookups in this dictionary,\n   e.g., "m.x" is equivalent to "m.__dict__["x"]". A module object\n   does not contain the code object used to initialize the module\n   (since it isn\'t needed once the initialization is done).\n\n   Attribute assignment updates the module\'s namespace dictionary,\n   e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n\n   Special read-only attribute: "__dict__" is the module\'s namespace\n   as a dictionary object.\n\n   **CPython implementation detail:** Because of the way CPython\n   clears module dictionaries, the module dictionary will be cleared\n   when the module falls out of scope even if the dictionary still has\n   live references.  To avoid this, copy the dictionary or keep the\n   module around while using its dictionary directly.\n\n   Predefined (writable) attributes: "__name__" is the module\'s name;\n   "__doc__" is the module\'s documentation string, or "None" if\n   unavailable; "__file__" is the pathname of the file from which the\n   module was loaded, if it was loaded from a file. The "__file__"\n   attribute is not present for C modules that are statically linked\n   into the interpreter; for extension modules loaded dynamically from\n   a shared library, it is the pathname of the shared library file.\n\nClasses\n   Both class types (new-style classes) and class objects (old-\n   style/classic classes) are typically created by class definitions\n   (see section Class definitions).  A class has a namespace\n   implemented by a dictionary object. Class attribute references are\n   translated to lookups in this dictionary, e.g., "C.x" is translated\n   to "C.__dict__["x"]" (although for new-style classes in particular\n   there are a number of hooks which allow for other means of locating\n   attributes). When the attribute name is not found there, the\n   attribute search continues in the base classes.  For old-style\n   classes, the search is depth-first, left-to-right in the order of\n   occurrence in the base class list. New-style classes use the more\n   complex C3 method resolution order which behaves correctly even in\n   the presence of \'diamond\' inheritance structures where there are\n   multiple inheritance paths leading back to a common ancestor.\n   Additional details on the C3 MRO used by new-style classes can be\n   found in the documentation accompanying the 2.3 release at\n   https://www.python.org/download/releases/2.3/mro/.\n\n   When a class attribute reference (for class "C", say) would yield a\n   user-defined function object or an unbound user-defined method\n   object whose associated class is either "C" or one of its base\n   classes, it is transformed into an unbound user-defined method\n   object whose "im_class" attribute is "C". When it would yield a\n   class method object, it is transformed into a bound user-defined\n   method object whose "im_self" attribute is "C".  When it would\n   yield a static method object, it is transformed into the object\n   wrapped by the static method object. See section Implementing\n   Descriptors for another way in which attributes retrieved from a\n   class may differ from those actually contained in its "__dict__"\n   (note that only new-style classes support descriptors).\n\n   Class attribute assignments update the class\'s dictionary, never\n   the dictionary of a base class.\n\n   A class object can be called (see above) to yield a class instance\n   (see below).\n\n   Special attributes: "__name__" is the class name; "__module__" is\n   the module name in which the class was defined; "__dict__" is the\n   dictionary containing the class\'s namespace; "__bases__" is a tuple\n   (possibly empty or a singleton) containing the base classes, in the\n   order of their occurrence in the base class list; "__doc__" is the\n   class\'s documentation string, or None if undefined.\n\nClass instances\n   A class instance is created by calling a class object (see above).\n   A class instance has a namespace implemented as a dictionary which\n   is the first place in which attribute references are searched.\n   When an attribute is not found there, and the instance\'s class has\n   an attribute by that name, the search continues with the class\n   attributes.  If a class attribute is found that is a user-defined\n   function object or an unbound user-defined method object whose\n   associated class is the class (call it "C") of the instance for\n   which the attribute reference was initiated or one of its bases, it\n   is transformed into a bound user-defined method object whose\n   "im_class" attribute is "C" and whose "im_self" attribute is the\n   instance. Static method and class method objects are also\n   transformed, as if they had been retrieved from class "C"; see\n   above under "Classes". See section Implementing Descriptors for\n   another way in which attributes of a class retrieved via its\n   instances may differ from the objects actually stored in the\n   class\'s "__dict__". If no class attribute is found, and the\n   object\'s class has a "__getattr__()" method, that is called to\n   satisfy the lookup.\n\n   Attribute assignments and deletions update the instance\'s\n   dictionary, never a class\'s dictionary.  If the class has a\n   "__setattr__()" or "__delattr__()" method, this is called instead\n   of updating the instance dictionary directly.\n\n   Class instances can pretend to be numbers, sequences, or mappings\n   if they have methods with certain special names.  See section\n   Special method names.\n\n   Special attributes: "__dict__" is the attribute dictionary;\n   "__class__" is the instance\'s class.\n\nFiles\n   A file object represents an open file.  File objects are created by\n   the "open()" built-in function, and also by "os.popen()",\n   "os.fdopen()", and the "makefile()" method of socket objects (and\n   perhaps by other functions or methods provided by extension\n   modules).  The objects "sys.stdin", "sys.stdout" and "sys.stderr"\n   are initialized to file objects corresponding to the interpreter\'s\n   standard input, output and error streams.  See File Objects for\n   complete documentation of file objects.\n\nInternal types\n   A few types used internally by the interpreter are exposed to the\n   user. Their definitions may change with future versions of the\n   interpreter, but they are mentioned here for completeness.\n\n   Code objects\n      Code objects represent *byte-compiled* executable Python code,\n      or *bytecode*. The difference between a code object and a\n      function object is that the function object contains an explicit\n      reference to the function\'s globals (the module in which it was\n      defined), while a code object contains no context; also the\n      default argument values are stored in the function object, not\n      in the code object (because they represent values calculated at\n      run-time).  Unlike function objects, code objects are immutable\n      and contain no references (directly or indirectly) to mutable\n      objects.\n\n      Special read-only attributes: "co_name" gives the function name;\n      "co_argcount" is the number of positional arguments (including\n      arguments with default values); "co_nlocals" is the number of\n      local variables used by the function (including arguments);\n      "co_varnames" is a tuple containing the names of the local\n      variables (starting with the argument names); "co_cellvars" is a\n      tuple containing the names of local variables that are\n      referenced by nested functions; "co_freevars" is a tuple\n      containing the names of free variables; "co_code" is a string\n      representing the sequence of bytecode instructions; "co_consts"\n      is a tuple containing the literals used by the bytecode;\n      "co_names" is a tuple containing the names used by the bytecode;\n      "co_filename" is the filename from which the code was compiled;\n      "co_firstlineno" is the first line number of the function;\n      "co_lnotab" is a string encoding the mapping from bytecode\n      offsets to line numbers (for details see the source code of the\n      interpreter); "co_stacksize" is the required stack size\n      (including local variables); "co_flags" is an integer encoding a\n      number of flags for the interpreter.\n\n      The following flag bits are defined for "co_flags": bit "0x04"\n      is set if the function uses the "*arguments" syntax to accept an\n      arbitrary number of positional arguments; bit "0x08" is set if\n      the function uses the "**keywords" syntax to accept arbitrary\n      keyword arguments; bit "0x20" is set if the function is a\n      generator.\n\n      Future feature declarations ("from __future__ import division")\n      also use bits in "co_flags" to indicate whether a code object\n      was compiled with a particular feature enabled: bit "0x2000" is\n      set if the function was compiled with future division enabled;\n      bits "0x10" and "0x1000" were used in earlier versions of\n      Python.\n\n      Other bits in "co_flags" are reserved for internal use.\n\n      If a code object represents a function, the first item in\n      "co_consts" is the documentation string of the function, or\n      "None" if undefined.\n\n   Frame objects\n      Frame objects represent execution frames.  They may occur in\n      traceback objects (see below).\n\n      Special read-only attributes: "f_back" is to the previous stack\n      frame (towards the caller), or "None" if this is the bottom\n      stack frame; "f_code" is the code object being executed in this\n      frame; "f_locals" is the dictionary used to look up local\n      variables; "f_globals" is used for global variables;\n      "f_builtins" is used for built-in (intrinsic) names;\n      "f_restricted" is a flag indicating whether the function is\n      executing in restricted execution mode; "f_lasti" gives the\n      precise instruction (this is an index into the bytecode string\n      of the code object).\n\n      Special writable attributes: "f_trace", if not "None", is a\n      function called at the start of each source code line (this is\n      used by the debugger); "f_exc_type", "f_exc_value",\n      "f_exc_traceback" represent the last exception raised in the\n      parent frame provided another exception was ever raised in the\n      current frame (in all other cases they are None); "f_lineno" is\n      the current line number of the frame --- writing to this from\n      within a trace function jumps to the given line (only for the\n      bottom-most frame).  A debugger can implement a Jump command\n      (aka Set Next Statement) by writing to f_lineno.\n\n   Traceback objects\n      Traceback objects represent a stack trace of an exception.  A\n      traceback object is created when an exception occurs.  When the\n      search for an exception handler unwinds the execution stack, at\n      each unwound level a traceback object is inserted in front of\n      the current traceback.  When an exception handler is entered,\n      the stack trace is made available to the program. (See section\n      The try statement.) It is accessible as "sys.exc_traceback", and\n      also as the third item of the tuple returned by\n      "sys.exc_info()".  The latter is the preferred interface, since\n      it works correctly when the program is using multiple threads.\n      When the program contains no suitable handler, the stack trace\n      is written (nicely formatted) to the standard error stream; if\n      the interpreter is interactive, it is also made available to the\n      user as "sys.last_traceback".\n\n      Special read-only attributes: "tb_next" is the next level in the\n      stack trace (towards the frame where the exception occurred), or\n      "None" if there is no next level; "tb_frame" points to the\n      execution frame of the current level; "tb_lineno" gives the line\n      number where the exception occurred; "tb_lasti" indicates the\n      precise instruction.  The line number and last instruction in\n      the traceback may differ from the line number of its frame\n      object if the exception occurred in a "try" statement with no\n      matching except clause or with a finally clause.\n\n   Slice objects\n      Slice objects are used to represent slices when *extended slice\n      syntax* is used. This is a slice using two colons, or multiple\n      slices or ellipses separated by commas, e.g., "a[i:j:step]",\n      "a[i:j, k:l]", or "a[..., i:j]".  They are also created by the\n      built-in "slice()" function.\n\n      Special read-only attributes: "start" is the lower bound; "stop"\n      is the upper bound; "step" is the step value; each is "None" if\n      omitted.  These attributes can have any type.\n\n      Slice objects support one method:\n\n      slice.indices(self, length)\n\n         This method takes a single integer argument *length* and\n         computes information about the extended slice that the slice\n         object would describe if applied to a sequence of *length*\n         items.  It returns a tuple of three integers; respectively\n         these are the *start* and *stop* indices and the *step* or\n         stride length of the slice. Missing or out-of-bounds indices\n         are handled in a manner consistent with regular slices.\n\n         New in version 2.3.\n\n   Static method objects\n      Static method objects provide a way of defeating the\n      transformation of function objects to method objects described\n      above. A static method object is a wrapper around any other\n      object, usually a user-defined method object. When a static\n      method object is retrieved from a class or a class instance, the\n      object actually returned is the wrapped object, which is not\n      subject to any further transformation. Static method objects are\n      not themselves callable, although the objects they wrap usually\n      are. Static method objects are created by the built-in\n      "staticmethod()" constructor.\n\n   Class method objects\n      A class method object, like a static method object, is a wrapper\n      around another object that alters the way in which that object\n      is retrieved from classes and class instances. The behaviour of\n      class method objects upon such retrieval is described above,\n      under "User-defined methods". Class method objects are created\n      by the built-in "classmethod()" constructor.\n',
  'typesfunctions': u'\nFunctions\n*********\n\nFunction objects are created by function definitions.  The only\noperation on a function object is to call it: "func(argument-list)".\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions.  Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee Function definitions for more information.\n',
- 'typesmapping': u'\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects.  There is currently only one standard\nmapping type, the *dictionary*.  (For other containers see the built\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values.  Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys.  Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry.  (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n   Return a new dictionary initialized from an optional positional\n   argument and a possibly empty set of keyword arguments.\n\n   If no positional argument is given, an empty dictionary is created.\n   If a positional argument is given and it is a mapping object, a\n   dictionary is created with the same key-value pairs as the mapping\n   object.  Otherwise, the positional argument must be an *iterable*\n   object.  Each item in the iterable must itself be an iterable with\n   exactly two objects.  The first object of each item becomes a key\n   in the new dictionary, and the second object the corresponding\n   value.  If a key occurs more than once, the last value for that key\n   becomes the corresponding value in the new dictionary.\n\n   If keyword arguments are given, the keyword arguments and their\n   values are added to the dictionary created from the positional\n   argument.  If a key being added is already present, the value from\n   the keyword argument replaces the value from the positional\n   argument.\n\n   To illustrate, the following examples all return a dictionary equal\n   to "{"one": 1, "two": 2, "three": 3}":\n\n      >>> a = dict(one=1, two=2, three=3)\n      >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n      >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n      >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n      >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n      >>> a == b == c == d == e\n      True\n\n   Providing keyword arguments as in the first example only works for\n   keys that are valid Python identifiers.  Otherwise, any valid keys\n   can be used.\n\n   New in version 2.2.\n\n   Changed in version 2.3: Support for building a dictionary from\n   keyword arguments added.\n\n   These are the operations that dictionaries support (and therefore,\n   custom mapping types should support too):\n\n   len(d)\n\n      Return the number of items in the dictionary *d*.\n\n   d[key]\n\n      Return the item of *d* with key *key*.  Raises a "KeyError" if\n      *key* is not in the map.\n\n      If a subclass of dict defines a method "__missing__()" and *key*\n      is not present, the "d[key]" operation calls that method with\n      the key *key* as argument.  The "d[key]" operation then returns\n      or raises whatever is returned or raised by the\n      "__missing__(key)" call. No other operations or methods invoke\n      "__missing__()". If "__missing__()" is not defined, "KeyError"\n      is raised. "__missing__()" must be a method; it cannot be an\n      instance variable:\n\n         >>> class Counter(dict):\n         ...     def __missing__(self, key):\n         ...         return 0\n         >>> c = Counter()\n         >>> c[\'red\']\n         0\n         >>> c[\'red\'] += 1\n         >>> c[\'red\']\n         1\n\n      The example above shows part of the implementation of\n      "collections.Counter".  A different "__missing__" method is used\n      by "collections.defaultdict".\n\n      New in version 2.5: Recognition of __missing__ methods of dict\n      subclasses.\n\n   d[key] = value\n\n      Set "d[key]" to *value*.\n\n   del d[key]\n\n      Remove "d[key]" from *d*.  Raises a "KeyError" if *key* is not\n      in the map.\n\n   key in d\n\n      Return "True" if *d* has a key *key*, else "False".\n\n      New in version 2.2.\n\n   key not in d\n\n      Equivalent to "not key in d".\n\n      New in version 2.2.\n\n   iter(d)\n\n      Return an iterator over the keys of the dictionary.  This is a\n      shortcut for "iterkeys()".\n\n   clear()\n\n      Remove all items from the dictionary.\n\n   copy()\n\n      Return a shallow copy of the dictionary.\n\n   fromkeys(seq[, value])\n\n      Create a new dictionary with keys from *seq* and values set to\n      *value*.\n\n      "fromkeys()" is a class method that returns a new dictionary.\n      *value* defaults to "None".\n\n      New in version 2.3.\n\n   get(key[, default])\n\n      Return the value for *key* if *key* is in the dictionary, else\n      *default*. If *default* is not given, it defaults to "None", so\n      that this method never raises a "KeyError".\n\n   has_key(key)\n\n      Test for the presence of *key* in the dictionary.  "has_key()"\n      is deprecated in favor of "key in d".\n\n   items()\n\n      Return a copy of the dictionary\'s list of "(key, value)" pairs.\n\n      **CPython implementation detail:** Keys and values are listed in\n      an arbitrary order which is non-random, varies across Python\n      implementations, and depends on the dictionary\'s history of\n      insertions and deletions.\n\n      If "items()", "keys()", "values()", "iteritems()", "iterkeys()",\n      and "itervalues()" are called with no intervening modifications\n      to the dictionary, the lists will directly correspond.  This\n      allows the creation of "(value, key)" pairs using "zip()":\n      "pairs = zip(d.values(), d.keys())".  The same relationship\n      holds for the "iterkeys()" and "itervalues()" methods: "pairs =\n      zip(d.itervalues(), d.iterkeys())" provides the same value for\n      "pairs". Another way to create the same list is "pairs = [(v, k)\n      for (k, v) in d.iteritems()]".\n\n   iteritems()\n\n      Return an iterator over the dictionary\'s "(key, value)" pairs.\n      See the note for "dict.items()".\n\n      Using "iteritems()" while adding or deleting entries in the\n      dictionary may raise a "RuntimeError" or fail to iterate over\n      all entries.\n\n      New in version 2.2.\n\n   iterkeys()\n\n      Return an iterator over the dictionary\'s keys.  See the note for\n      "dict.items()".\n\n      Using "iterkeys()" while adding or deleting entries in the\n      dictionary may raise a "RuntimeError" or fail to iterate over\n      all entries.\n\n      New in version 2.2.\n\n   itervalues()\n\n      Return an iterator over the dictionary\'s values.  See the note\n      for "dict.items()".\n\n      Using "itervalues()" while adding or deleting entries in the\n      dictionary may raise a "RuntimeError" or fail to iterate over\n      all entries.\n\n      New in version 2.2.\n\n   keys()\n\n      Return a copy of the dictionary\'s list of keys.  See the note\n      for "dict.items()".\n\n   pop(key[, default])\n\n      If *key* is in the dictionary, remove it and return its value,\n      else return *default*.  If *default* is not given and *key* is\n      not in the dictionary, a "KeyError" is raised.\n\n      New in version 2.3.\n\n   popitem()\n\n      Remove and return an arbitrary "(key, value)" pair from the\n      dictionary.\n\n      "popitem()" is useful to destructively iterate over a\n      dictionary, as often used in set algorithms.  If the dictionary\n      is empty, calling "popitem()" raises a "KeyError".\n\n   setdefault(key[, default])\n\n      If *key* is in the dictionary, return its value.  If not, insert\n      *key* with a value of *default* and return *default*.  *default*\n      defaults to "None".\n\n   update([other])\n\n      Update the dictionary with the key/value pairs from *other*,\n      overwriting existing keys.  Return "None".\n\n      "update()" accepts either another dictionary object or an\n      iterable of key/value pairs (as tuples or other iterables of\n      length two).  If keyword arguments are specified, the dictionary\n      is then updated with those key/value pairs: "d.update(red=1,\n      blue=2)".\n\n      Changed in version 2.4: Allowed the argument to be an iterable\n      of key/value pairs and allowed keyword arguments.\n\n   values()\n\n      Return a copy of the dictionary\'s list of values.  See the note\n      for "dict.items()".\n\n   viewitems()\n\n      Return a new view of the dictionary\'s items ("(key, value)"\n      pairs).  See below for documentation of view objects.\n\n      New in version 2.7.\n\n   viewkeys()\n\n      Return a new view of the dictionary\'s keys.  See below for\n      documentation of view objects.\n\n      New in version 2.7.\n\n   viewvalues()\n\n      Return a new view of the dictionary\'s values.  See below for\n      documentation of view objects.\n\n      New in version 2.7.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.viewkeys()", "dict.viewvalues()" and\n"dict.viewitems()" are *view objects*.  They provide a dynamic view on\nthe dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n   Return the number of entries in the dictionary.\n\niter(dictview)\n\n   Return an iterator over the keys, values or items (represented as\n   tuples of "(key, value)") in the dictionary.\n\n   Keys and values are iterated over in an arbitrary order which is\n   non-random, varies across Python implementations, and depends on\n   the dictionary\'s history of insertions and deletions. If keys,\n   values and items views are iterated over with no intervening\n   modifications to the dictionary, the order of items will directly\n   correspond.  This allows the creation of "(value, key)" pairs using\n   "zip()": "pairs = zip(d.values(), d.keys())".  Another way to\n   create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n   Iterating views while adding or deleting entries in the dictionary\n   may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n   Return "True" if *x* is in the underlying dictionary\'s keys, values\n   or items (in the latter case, *x* should be a "(key, value)"\n   tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that (key, value) pairs are unique and\nhashable, then the items view is also set-like.  (Values views are not\ntreated as set-like since the entries are generally not unique.)  Then\nthese set operations are available ("other" refers either to another\nview or a set):\n\ndictview & other\n\n   Return the intersection of the dictview and the other object as a\n   new set.\n\ndictview | other\n\n   Return the union of the dictview and the other object as a new set.\n\ndictview - other\n\n   Return the difference between the dictview and the other object\n   (all elements in *dictview* that aren\'t in *other*) as a new set.\n\ndictview ^ other\n\n   Return the symmetric difference (all elements either in *dictview*\n   or *other*, but not in both) of the dictview and the other object\n   as a new set.\n\nAn example of dictionary view usage:\n\n   >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n   >>> keys = dishes.viewkeys()\n   >>> values = dishes.viewvalues()\n\n   >>> # iteration\n   >>> n = 0\n   >>> for val in values:\n   ...     n += val\n   >>> print(n)\n   504\n\n   >>> # keys and values are iterated over in the same order\n   >>> list(keys)\n   [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n   >>> list(values)\n   [2, 1, 1, 500]\n\n   >>> # view objects are dynamic and reflect dict changes\n   >>> del dishes[\'eggs\']\n   >>> del dishes[\'sausage\']\n   >>> list(keys)\n   [\'spam\', \'bacon\']\n\n   >>> # set operations\n   >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n   {\'bacon\'}\n',
+ 'typesmapping': u'\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects.  There is currently only one standard\nmapping type, the *dictionary*.  (For other containers see the built\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values.  Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys.  Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry.  (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass dict(**kwarg)\nclass dict(mapping, **kwarg)\nclass dict(iterable, **kwarg)\n\n   Return a new dictionary initialized from an optional positional\n   argument and a possibly empty set of keyword arguments.\n\n   If no positional argument is given, an empty dictionary is created.\n   If a positional argument is given and it is a mapping object, a\n   dictionary is created with the same key-value pairs as the mapping\n   object.  Otherwise, the positional argument must be an *iterable*\n   object.  Each item in the iterable must itself be an iterable with\n   exactly two objects.  The first object of each item becomes a key\n   in the new dictionary, and the second object the corresponding\n   value.  If a key occurs more than once, the last value for that key\n   becomes the corresponding value in the new dictionary.\n\n   If keyword arguments are given, the keyword arguments and their\n   values are added to the dictionary created from the positional\n   argument.  If a key being added is already present, the value from\n   the keyword argument replaces the value from the positional\n   argument.\n\n   To illustrate, the following examples all return a dictionary equal\n   to "{"one": 1, "two": 2, "three": 3}":\n\n      >>> a = dict(one=1, two=2, three=3)\n      >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n      >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n      >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n      >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n      >>> a == b == c == d == e\n      True\n\n   Providing keyword arguments as in the first example only works for\n   keys that are valid Python identifiers.  Otherwise, any valid keys\n   can be used.\n\n   New in version 2.2.\n\n   Changed in version 2.3: Support for building a dictionary from\n   keyword arguments added.\n\n   These are the operations that dictionaries support (and therefore,\n   custom mapping types should support too):\n\n   len(d)\n\n      Return the number of items in the dictionary *d*.\n\n   d[key]\n\n      Return the item of *d* with key *key*.  Raises a "KeyError" if\n      *key* is not in the map.\n\n      If a subclass of dict defines a method "__missing__()" and *key*\n      is not present, the "d[key]" operation calls that method with\n      the key *key* as argument.  The "d[key]" operation then returns\n      or raises whatever is returned or raised by the\n      "__missing__(key)" call. No other operations or methods invoke\n      "__missing__()". If "__missing__()" is not defined, "KeyError"\n      is raised. "__missing__()" must be a method; it cannot be an\n      instance variable:\n\n         >>> class Counter(dict):\n         ...     def __missing__(self, key):\n         ...         return 0\n         >>> c = Counter()\n         >>> c[\'red\']\n         0\n         >>> c[\'red\'] += 1\n         >>> c[\'red\']\n         1\n\n      The example above shows part of the implementation of\n      "collections.Counter".  A different "__missing__" method is used\n      by "collections.defaultdict".\n\n      New in version 2.5: Recognition of __missing__ methods of dict\n      subclasses.\n\n   d[key] = value\n\n      Set "d[key]" to *value*.\n\n   del d[key]\n\n      Remove "d[key]" from *d*.  Raises a "KeyError" if *key* is not\n      in the map.\n\n   key in d\n\n      Return "True" if *d* has a key *key*, else "False".\n\n      New in version 2.2.\n\n   key not in d\n\n      Equivalent to "not key in d".\n\n      New in version 2.2.\n\n   iter(d)\n\n      Return an iterator over the keys of the dictionary.  This is a\n      shortcut for "iterkeys()".\n\n   clear()\n\n      Remove all items from the dictionary.\n\n   copy()\n\n      Return a shallow copy of the dictionary.\n\n   fromkeys(seq[, value])\n\n      Create a new dictionary with keys from *seq* and values set to\n      *value*.\n\n      "fromkeys()" is a class method that returns a new dictionary.\n      *value* defaults to "None".\n\n      New in version 2.3.\n\n   get(key[, default])\n\n      Return the value for *key* if *key* is in the dictionary, else\n      *default*. If *default* is not given, it defaults to "None", so\n      that this method never raises a "KeyError".\n\n   has_key(key)\n\n      Test for the presence of *key* in the dictionary.  "has_key()"\n      is deprecated in favor of "key in d".\n\n   items()\n\n      Return a copy of the dictionary\'s list of "(key, value)" pairs.\n\n      **CPython implementation detail:** Keys and values are listed in\n      an arbitrary order which is non-random, varies across Python\n      implementations, and depends on the dictionary\'s history of\n      insertions and deletions.\n\n      If "items()", "keys()", "values()", "iteritems()", "iterkeys()",\n      and "itervalues()" are called with no intervening modifications\n      to the dictionary, the lists will directly correspond.  This\n      allows the creation of "(value, key)" pairs using "zip()":\n      "pairs = zip(d.values(), d.keys())".  The same relationship\n      holds for the "iterkeys()" and "itervalues()" methods: "pairs =\n      zip(d.itervalues(), d.iterkeys())" provides the same value for\n      "pairs". Another way to create the same list is "pairs = [(v, k)\n      for (k, v) in d.iteritems()]".\n\n   iteritems()\n\n      Return an iterator over the dictionary\'s "(key, value)" pairs.\n      See the note for "dict.items()".\n\n      Using "iteritems()" while adding or deleting entries in the\n      dictionary may raise a "RuntimeError" or fail to iterate over\n      all entries.\n\n      New in version 2.2.\n\n   iterkeys()\n\n      Return an iterator over the dictionary\'s keys.  See the note for\n      "dict.items()".\n\n      Using "iterkeys()" while adding or deleting entries in the\n      dictionary may raise a "RuntimeError" or fail to iterate over\n      all entries.\n\n      New in version 2.2.\n\n   itervalues()\n\n      Return an iterator over the dictionary\'s values.  See the note\n      for "dict.items()".\n\n      Using "itervalues()" while adding or deleting entries in the\n      dictionary may raise a "RuntimeError" or fail to iterate over\n      all entries.\n\n      New in version 2.2.\n\n   keys()\n\n      Return a copy of the dictionary\'s list of keys.  See the note\n      for "dict.items()".\n\n   pop(key[, default])\n\n      If *key* is in the dictionary, remove it and return its value,\n      else return *default*.  If *default* is not given and *key* is\n      not in the dictionary, a "KeyError" is raised.\n\n      New in version 2.3.\n\n   popitem()\n\n      Remove and return an arbitrary "(key, value)" pair from the\n      dictionary.\n\n      "popitem()" is useful to destructively iterate over a\n      dictionary, as often used in set algorithms.  If the dictionary\n      is empty, calling "popitem()" raises a "KeyError".\n\n   setdefault(key[, default])\n\n      If *key* is in the dictionary, return its value.  If not, insert\n      *key* with a value of *default* and return *default*.  *default*\n      defaults to "None".\n\n   update([other])\n\n      Update the dictionary with the key/value pairs from *other*,\n      overwriting existing keys.  Return "None".\n\n      "update()" accepts either another dictionary object or an\n      iterable of key/value pairs (as tuples or other iterables of\n      length two).  If keyword arguments are specified, the dictionary\n      is then updated with those key/value pairs: "d.update(red=1,\n      blue=2)".\n\n      Changed in version 2.4: Allowed the argument to be an iterable\n      of key/value pairs and allowed keyword arguments.\n\n   values()\n\n      Return a copy of the dictionary\'s list of values.  See the note\n      for "dict.items()".\n\n   viewitems()\n\n      Return a new view of the dictionary\'s items ("(key, value)"\n      pairs).  See below for documentation of view objects.\n\n      New in version 2.7.\n\n   viewkeys()\n\n      Return a new view of the dictionary\'s keys.  See below for\n      documentation of view objects.\n\n      New in version 2.7.\n\n   viewvalues()\n\n      Return a new view of the dictionary\'s values.  See below for\n      documentation of view objects.\n\n      New in version 2.7.\n\n   Dictionaries compare equal if and only if they have the same "(key,\n   value)" pairs.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.viewkeys()", "dict.viewvalues()" and\n"dict.viewitems()" are *view objects*.  They provide a dynamic view on\nthe dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n   Return the number of entries in the dictionary.\n\niter(dictview)\n\n   Return an iterator over the keys, values or items (represented as\n   tuples of "(key, value)") in the dictionary.\n\n   Keys and values are iterated over in an arbitrary order which is\n   non-random, varies across Python implementations, and depends on\n   the dictionary\'s history of insertions and deletions. If keys,\n   values and items views are iterated over with no intervening\n   modifications to the dictionary, the order of items will directly\n   correspond.  This allows the creation of "(value, key)" pairs using\n   "zip()": "pairs = zip(d.values(), d.keys())".  Another way to\n   create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n   Iterating views while adding or deleting entries in the dictionary\n   may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n   Return "True" if *x* is in the underlying dictionary\'s keys, values\n   or items (in the latter case, *x* should be a "(key, value)"\n   tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that (key, value) pairs are unique and\nhashable, then the items view is also set-like.  (Values views are not\ntreated as set-like since the entries are generally not unique.)  Then\nthese set operations are available ("other" refers either to another\nview or a set):\n\ndictview & other\n\n   Return the intersection of the dictview and the other object as a\n   new set.\n\ndictview | other\n\n   Return the union of the dictview and the other object as a new set.\n\ndictview - other\n\n   Return the difference between the dictview and the other object\n   (all elements in *dictview* that aren\'t in *other*) as a new set.\n\ndictview ^ other\n\n   Return the symmetric difference (all elements either in *dictview*\n   or *other*, but not in both) of the dictview and the other object\n   as a new set.\n\nAn example of dictionary view usage:\n\n   >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n   >>> keys = dishes.viewkeys()\n   >>> values = dishes.viewvalues()\n\n   >>> # iteration\n   >>> n = 0\n   >>> for val in values:\n   ...     n += val\n   >>> print(n)\n   504\n\n   >>> # keys and values are iterated over in the same order\n   >>> list(keys)\n   [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n   >>> list(values)\n   [2, 1, 1, 500]\n\n   >>> # view objects are dynamic and reflect dict changes\n   >>> del dishes[\'eggs\']\n   >>> del dishes[\'sausage\']\n   >>> list(keys)\n   [\'spam\', \'bacon\']\n\n   >>> # set operations\n   >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n   {\'bacon\'}\n',
  'typesmethods': u'\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as "append()" on lists)\nand class instance methods.  Built-in methods are described with the\ntypes that support them.\n\nThe implementation adds two special read-only attributes to class\ninstance methods: "m.im_self" is the object on which the method\noperates, and "m.im_func" is the function implementing the method.\nCalling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to\ncalling "m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)".\n\nClass instance methods are either *bound* or *unbound*, referring to\nwhether the method was accessed through an instance or a class,\nrespectively.  When a method is unbound, its "im_self" attribute will\nbe "None" and if called, an explicit "self" object must be passed as\nthe first argument.  In this case, "self" must be an instance of the\nunbound method\'s class (or a subclass of that class), otherwise a\n"TypeError" is raised.\n\nLike function objects, methods objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object ("meth.im_func"), setting method\nattributes on either bound or unbound methods is disallowed.\nAttempting to set an attribute on a method results in an\n"AttributeError" being raised.  In order to set a method attribute,\nyou need to explicitly set it on the underlying function object:\n\n   >>> class C:\n   ...     def method(self):\n   ...         pass\n   ...\n   >>> c = C()\n   >>> c.method.whoami = \'my name is method\'  # can\'t set on the method\n   Traceback (most recent call last):\n     File "<stdin>", line 1, in <module>\n   AttributeError: \'instancemethod\' object has no attribute \'whoami\'\n   >>> c.method.im_func.whoami = \'my name is method\'\n   >>> c.method.whoami\n   \'my name is method\'\n\nSee The standard type hierarchy for more information.\n',
  'typesmodules': u'\nModules\n*******\n\nThe only special operation on a module is attribute access: "m.name",\nwhere *m* is a module and *name* accesses a name defined in *m*\'s\nsymbol table. Module attributes can be assigned to.  (Note that the\n"import" statement is not, strictly speaking, an operation on a module\nobject; "import foo" does not require a module object named *foo* to\nexist, rather it requires an (external) *definition* for a module\nnamed *foo* somewhere.)\n\nA special attribute of every module is "__dict__". This is the\ndictionary containing the module\'s symbol table. Modifying this\ndictionary will actually change the module\'s symbol table, but direct\nassignment to the "__dict__" attribute is not possible (you can write\n"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but you can\'t\nwrite "m.__dict__ = {}").  Modifying "__dict__" directly is not\nrecommended.\n\nModules built into the interpreter are written like this: "<module\n\'sys\' (built-in)>".  If loaded from a file, they are written as\n"<module \'os\' from \'/usr/local/lib/pythonX.Y/os.pyc\'>".\n',
- 'typesseq': u'\nSequence Types --- "str", "unicode", "list", "tuple", "bytearray", "buffer", "xrange"\n*************************************************************************************\n\nThere are seven sequence types: strings, Unicode strings, lists,\ntuples, bytearrays, buffers, and xrange objects.\n\nFor other containers see the built in "dict" and "set" classes, and\nthe "collections" module.\n\nString literals are written in single or double quotes: "\'xyzzy\'",\n""frobozz"".  See String literals for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding "\'u\'" character: "u\'abc\'", "u"def"". In addition to\nthe functionality described here, there are also string-specific\nmethods described in the String Methods section. Lists are constructed\nwith square brackets, separating items with commas: "[a, b, c]".\nTuples are constructed by the comma operator (not within square\nbrackets), with or without enclosing parentheses, but an empty tuple\nmust have the enclosing parentheses, such as "a, b, c" or "()".  A\nsingle item tuple must have a trailing comma, such as "(d,)".\n\nBytearray objects are created with the built-in function\n"bytearray()".\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the built-in function "buffer()".  They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n"xrange()" function.  They don\'t support slicing, concatenation or\nrepetition, and using "in", "not in", "min()" or "max()" on them is\ninefficient.\n\nMost sequence types support the following operations.  The "in" and\n"not in" operations have the same priorities as the comparison\noperations.  The "+" and "*" operations have the same priority as the\ncorresponding numeric operations. [3] Additional methods are provided\nfor Mutable Sequence Types.\n\nThis table lists the sequence operations sorted in ascending priority.\nIn the table, *s* and *t* are sequences of the same type; *n*, *i* and\n*j* are integers:\n\n+--------------------+----------------------------------+------------+\n| Operation          | Result                           | Notes      |\n+====================+==================================+============+\n| "x in s"           | "True" if an item of *s* is      | (1)        |\n|                    | equal to *x*, else "False"       |            |\n+--------------------+----------------------------------+------------+\n| "x not in s"       | "False" if an item of *s* is     | (1)        |\n|                    | equal to *x*, else "True"        |            |\n+--------------------+----------------------------------+------------+\n| "s + t"            | the concatenation of *s* and *t* | (6)        |\n+--------------------+----------------------------------+------------+\n| "s * n, n * s"     | *n* shallow copies of *s*        | (2)        |\n|                    | concatenated                     |            |\n+--------------------+----------------------------------+------------+\n| "s[i]"             | *i*th item of *s*, origin 0      | (3)        |\n+--------------------+----------------------------------+------------+\n| "s[i:j]"           | slice of *s* from *i* to *j*     | (3)(4)     |\n+--------------------+----------------------------------+------------+\n| "s[i:j:k]"         | slice of *s* from *i* to *j*     | (3)(5)     |\n|                    | with step *k*                    |            |\n+--------------------+----------------------------------+------------+\n| "len(s)"           | length of *s*                    |            |\n+--------------------+----------------------------------+------------+\n| "min(s)"           | smallest item of *s*             |            |\n+--------------------+----------------------------------+------------+\n| "max(s)"           | largest item of *s*              |            |\n+--------------------+----------------------------------+------------+\n| "s.index(x)"       | index of the first occurrence of |            |\n|                    | *x* in *s*                       |            |\n+--------------------+----------------------------------+------------+\n| "s.count(x)"       | total number of occurrences of   |            |\n|                    | *x* in *s*                       |            |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see Comparisons in the language reference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the "in" and "not\n   in" operations act like a substring test.  In Python versions\n   before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n   beyond, *x* may be a string of any length.\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n   empty sequence of the same type as *s*).  Note also that the copies\n   are shallow; nested structures are not copied.  This often haunts\n   new Python programmers; consider:\n\n   >>> lists = [[]] * 3\n   >>> lists\n   [[], [], []]\n   >>> lists[0].append(3)\n   >>> lists\n   [[3], [3], [3]]\n\n   What has happened is that "[[]]" is a one-element list containing\n   an empty list, so all three elements of "[[]] * 3" are (pointers\n   to) this single empty list.  Modifying any of the elements of\n   "lists" modifies this single list. You can create a list of\n   different lists this way:\n\n   >>> lists = [[] for i in range(3)]\n   >>> lists[0].append(3)\n   >>> lists[1].append(5)\n   >>> lists[2].append(7)\n   >>> lists\n   [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of\n   the string: "len(s) + i" or "len(s) + j" is substituted.  But note\n   that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n   items with index *k* such that "i <= k < j".  If *i* or *j* is\n   greater than "len(s)", use "len(s)".  If *i* is omitted or "None",\n   use "0".  If *j* is omitted or "None", use "len(s)".  If *i* is\n   greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n   sequence of items with index  "x = i + n*k" such that "0 <= n <\n   (j-i)/k".  In other words, the indices are "i", "i+k", "i+2*k",\n   "i+3*k" and so on, stopping when *j* is reached (but never\n   including *j*).  If *i* or *j* is greater than "len(s)", use\n   "len(s)".  If *i* or *j* are omitted or "None", they become "end"\n   values (which end depends on the sign of *k*).  Note, *k* cannot be\n   zero. If *k* is "None", it is treated like "1".\n\n6. **CPython implementation detail:** If *s* and *t* are both\n   strings, some Python implementations such as CPython can usually\n   perform an in-place optimization for assignments of the form "s = s\n   + t" or "s += t".  When applicable, this optimization makes\n   quadratic run-time much less likely.  This optimization is both\n   version and implementation dependent.  For performance sensitive\n   code, it is preferable to use the "str.join()" method which assures\n   consistent linear concatenation performance across versions and\n   implementations.\n\n   Changed in version 2.4: Formerly, string concatenation never\n   occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support.  Some of them are also available on\n"bytearray" objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange section. To output formatted strings use\ntemplate strings or the "%" operator described in the String\nFormatting Operations section. Also, see the "re" module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n   Return a copy of the string with its first character capitalized\n   and the rest lowercased.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n   Return centered in a string of length *width*. Padding is done\n   using the specified *fillchar* (default is a space).\n\n   Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n   Return the number of non-overlapping occurrences of substring *sub*\n   in the range [*start*, *end*].  Optional arguments *start* and\n   *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n   Decodes the string using the codec registered for *encoding*.\n   *encoding* defaults to the default string encoding.  *errors* may\n   be given to set a different error handling scheme.  The default is\n   "\'strict\'", meaning that encoding errors raise "UnicodeError".\n   Other possible values are "\'ignore\'", "\'replace\'" and any other\n   name registered via "codecs.register_error()", see section Codec\n   Base Classes.\n\n   New in version 2.2.\n\n   Changed in version 2.3: Support for other error handling schemes\n   added.\n\n   Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n   Return an encoded version of the string.  Default encoding is the\n   current default string encoding.  *errors* may be given to set a\n   different error handling scheme.  The default for *errors* is\n   "\'strict\'", meaning that encoding errors raise a "UnicodeError".\n   Other possible values are "\'ignore\'", "\'replace\'",\n   "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other name\n   registered via "codecs.register_error()", see section Codec Base\n   Classes. For a list of possible encodings, see section Standard\n   Encodings.\n\n   New in version 2.0.\n\n   Changed in version 2.3: Support for "\'xmlcharrefreplace\'" and\n   "\'backslashreplace\'" and other error handling schemes added.\n\n   Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n   Return "True" if the string ends with the specified *suffix*,\n   otherwise return "False".  *suffix* can also be a tuple of suffixes\n   to look for.  With optional *start*, test beginning at that\n   position.  With optional *end*, stop comparing at that position.\n\n   Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n   Return a copy of the string where all tab characters are replaced\n   by one or more spaces, depending on the current column and the\n   given tab size.  Tab positions occur every *tabsize* characters\n   (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n   To expand the string, the current column is set to zero and the\n   string is examined character by character.  If the character is a\n   tab ("\\t"), one or more space characters are inserted in the result\n   until the current column is equal to the next tab position. (The\n   tab character itself is not copied.)  If the character is a newline\n   ("\\n") or return ("\\r"), it is copied and the current column is\n   reset to zero.  Any other character is copied unchanged and the\n   current column is incremented by one regardless of how the\n   character is represented when printed.\n\n   >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n   \'01      012     0123    01234\'\n   >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n   \'01  012 0123    01234\'\n\nstr.find(sub[, start[, end]])\n\n   Return the lowest index in the string where substring *sub* is\n   found, such that *sub* is contained in the slice "s[start:end]".\n   Optional arguments *start* and *end* are interpreted as in slice\n   notation.  Return "-1" if *sub* is not found.\n\n   Note: The "find()" method should be used only if you need to know\n     the position of *sub*.  To check if *sub* is a substring or not,\n     use the "in" operator:\n\n        >>> \'Py\' in \'Python\'\n        True\n\nstr.format(*args, **kwargs)\n\n   Perform a string formatting operation.  The string on which this\n   method is called can contain literal text or replacement fields\n   delimited by braces "{}".  Each replacement field contains either\n   the numeric index of a positional argument, or the name of a\n   keyword argument.  Returns a copy of the string where each\n   replacement field is replaced with the string value of the\n   corresponding argument.\n\n   >>> "The sum of 1 + 2 is {0}".format(1+2)\n   \'The sum of 1 + 2 is 3\'\n\n   See Format String Syntax for a description of the various\n   formatting options that can be specified in format strings.\n\n   This method of string formatting is the new standard in Python 3,\n   and should be preferred to the "%" formatting described in String\n   Formatting Operations in new code.\n\n   New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n   Like "find()", but raise "ValueError" when the substring is not\n   found.\n\nstr.isalnum()\n\n   Return true if all characters in the string are alphanumeric and\n   there is at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n   Return true if all characters in the string are alphabetic and\n   there is at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n   Return true if all characters in the string are digits and there is\n   at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n   Return true if all cased characters [4] in the string are lowercase\n   and there is at least one cased character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n   Return true if there are only whitespace characters in the string\n   and there is at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n   Return true if the string is a titlecased string and there is at\n   least one character, for example uppercase characters may only\n   follow uncased characters and lowercase characters only cased ones.\n   Return false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n   Return true if all cased characters [4] in the string are uppercase\n   and there is at least one cased character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n   Return a string which is the concatenation of the strings in the\n   *iterable* *iterable*.  The separator between elements is the\n   string providing this method.\n\nstr.ljust(width[, fillchar])\n\n   Return the string left justified in a string of length *width*.\n   Padding is done using the specified *fillchar* (default is a\n   space).  The original string is returned if *width* is less than or\n   equal to "len(s)".\n\n   Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n   Return a copy of the string with all the cased characters [4]\n   converted to lowercase.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n   Return a copy of the string with leading characters removed.  The\n   *chars* argument is a string specifying the set of characters to be\n   removed.  If omitted or "None", the *chars* argument defaults to\n   removing whitespace.  The *chars* argument is not a prefix; rather,\n   all combinations of its values are stripped:\n\n   >>> \'   spacious   \'.lstrip()\n   \'spacious   \'\n   >>> \'www.example.com\'.lstrip(\'cmowz.\')\n   \'example.com\'\n\n   Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n   Split the string at the first occurrence of *sep*, and return a\n   3-tuple containing the part before the separator, the separator\n   itself, and the part after the separator.  If the separator is not\n   found, return a 3-tuple containing the string itself, followed by\n   two empty strings.\n\n   New in version 2.5.\n\nstr.replace(old, new[, count])\n\n   Return a copy of the string with all occurrences of substring *old*\n   replaced by *new*.  If the optional argument *count* is given, only\n   the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n   Return the highest index in the string where substring *sub* is\n   found, such that *sub* is contained within "s[start:end]".\n   Optional arguments *start* and *end* are interpreted as in slice\n   notation.  Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n   Like "rfind()" but raises "ValueError" when the substring *sub* is\n   not found.\n\nstr.rjust(width[, fillchar])\n\n   Return the string right justified in a string of length *width*.\n   Padding is done using the specified *fillchar* (default is a\n   space). The original string is returned if *width* is less than or\n   equal to "len(s)".\n\n   Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n   Split the string at the last occurrence of *sep*, and return a\n   3-tuple containing the part before the separator, the separator\n   itself, and the part after the separator.  If the separator is not\n   found, return a 3-tuple containing two empty strings, followed by\n   the string itself.\n\n   New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n   Return a list of the words in the string, using *sep* as the\n   delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n   are done, the *rightmost* ones.  If *sep* is not specified or\n   "None", any whitespace string is a separator.  Except for splitting\n   from the right, "rsplit()" behaves like "split()" which is\n   described in detail below.\n\n   New in version 2.4.\n\nstr.rstrip([chars])\n\n   Return a copy of the string with trailing characters removed.  The\n   *chars* argument is a string specifying the set of characters to be\n   removed.  If omitted or "None", the *chars* argument defaults to\n   removing whitespace.  The *chars* argument is not a suffix; rather,\n   all combinations of its values are stripped:\n\n   >>> \'   spacious   \'.rstrip()\n   \'   spacious\'\n   >>> \'mississippi\'.rstrip(\'ipz\')\n   \'mississ\'\n\n   Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n   Return a list of the words in the string, using *sep* as the\n   delimiter string.  If *maxsplit* is given, at most *maxsplit*\n   splits are done (thus, the list will have at most "maxsplit+1"\n   elements).  If *maxsplit* is not specified or "-1", then there is\n   no limit on the number of splits (all possible splits are made).\n\n   If *sep* is given, consecutive delimiters are not grouped together\n   and are deemed to delimit empty strings (for example,\n   "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']").  The *sep* argument\n   may consist of multiple characters (for example,\n   "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n   empty string with a specified separator returns "[\'\']".\n\n   If *sep* is not specified or is "None", a different splitting\n   algorithm is applied: runs of consecutive whitespace are regarded\n   as a single separator, and the result will contain no empty strings\n   at the start or end if the string has leading or trailing\n   whitespace.  Consequently, splitting an empty string or a string\n   consisting of just whitespace with a "None" separator returns "[]".\n\n   For example, "\' 1  2   3  \'.split()" returns "[\'1\', \'2\', \'3\']", and\n   "\'  1  2   3  \'.split(None, 1)" returns "[\'1\', \'2   3  \']".\n\nstr.splitlines([keepends])\n\n   Return a list of the lines in the string, breaking at line\n   boundaries. This method uses the *universal newlines* approach to\n   splitting lines. Line breaks are not included in the resulting list\n   unless *keepends* is given and true.\n\n   For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n   c\', \'\', \'de fg\', \'kl\']", while the same call with\n   "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']".\n\n   Unlike "split()" when a delimiter string *sep* is given, this\n   method returns an empty list for the empty string, and a terminal\n   line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n   Return "True" if string starts with the *prefix*, otherwise return\n   "False". *prefix* can also be a tuple of prefixes to look for.\n   With optional *start*, test string beginning at that position.\n   With optional *end*, stop comparing string at that position.\n\n   Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n   Return a copy of the string with the leading and trailing\n   characters removed. The *chars* argument is a string specifying the\n   set of characters to be removed. If omitted or "None", the *chars*\n   argument defaults to removing whitespace. The *chars* argument is\n   not a prefix or suffix; rather, all combinations of its values are\n   stripped:\n\n   >>> \'   spacious   \'.strip()\n   \'spacious\'\n   >>> \'www.example.com\'.strip(\'cmowz.\')\n   \'example\'\n\n   Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n   Return a copy of the string with uppercase characters converted to\n   lowercase and vice versa.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n   Return a titlecased version of the string where words start with an\n   uppercase character and the remaining characters are lowercase.\n\n   The algorithm uses a simple language-independent definition of a\n   word as groups of consecutive letters.  The definition works in\n   many contexts but it means that apostrophes in contractions and\n   possessives form word boundaries, which may not be the desired\n   result:\n\n      >>> "they\'re bill\'s friends from the UK".title()\n      "They\'Re Bill\'S Friends From The Uk"\n\n   A workaround for apostrophes can be constructed using regular\n   expressions:\n\n      >>> import re\n      >>> def titlecase(s):\n      ...     return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n      ...                   lambda mo: mo.group(0)[0].upper() +\n      ...                              mo.group(0)[1:].lower(),\n      ...                   s)\n      ...\n      >>> titlecase("they\'re bill\'s friends.")\n      "They\'re Bill\'s Friends."\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n   Return a copy of the string where all characters occurring in the\n   optional argument *deletechars* are removed, and the remaining\n   characters have been mapped through the given translation table,\n   which must be a string of length 256.\n\n   You can use the "maketrans()" helper function in the "string"\n   module to create a translation table. For string objects, set the\n   *table* argument to "None" for translations that only delete\n   characters:\n\n   >>> \'read this short text\'.translate(None, \'aeiou\')\n   \'rd ths shrt txt\'\n\n   New in version 2.6: Support for a "None" *table* argument.\n\n   For Unicode objects, the "translate()" method does not accept the\n   optional *deletechars* argument.  Instead, it returns a copy of the\n   *s* where all characters have been mapped through the given\n   translation table which must be a mapping of Unicode ordinals to\n   Unicode ordinals, Unicode strings or "None". Unmapped characters\n   are left untouched. Characters mapped to "None" are deleted.  Note,\n   a more flexible approach is to create a custom character mapping\n   codec using the "codecs" module (see "encodings.cp1251" for an\n   example).\n\nstr.upper()\n\n   Return a copy of the string with all the cased characters [4]\n   converted to uppercase.  Note that "str.upper().isupper()" might be\n   "False" if "s" contains uncased characters or if the Unicode\n   category of the resulting character(s) is not "Lu" (Letter,\n   uppercase), but e.g. "Lt" (Letter, titlecase).\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n   Return the numeric string left filled with zeros in a string of\n   length *width*.  A sign prefix is handled correctly.  The original\n   string is returned if *width* is less than or equal to "len(s)".\n\n   New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n   Return "True" if there are only numeric characters in S, "False"\n   otherwise. Numeric characters include digit characters, and all\n   characters that have the Unicode numeric value property, e.g.\n   U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n   Return "True" if there are only decimal characters in S, "False"\n   otherwise. Decimal characters include digit characters, and all\n   characters that can be used to form decimal-radix numbers, e.g.\n   U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the "%"\noperator (modulo).  This is also known as the string *formatting* or\n*interpolation* operator.  Given "format % values" (where *format* is\na string or Unicode object), "%" conversion specifications in *format*\nare replaced with zero or more elements of *values*.  The effect is\nsimilar to the using "sprintf()" in the C language.  If *format* is a\nUnicode object, or if any of the objects being converted using the\n"%s" conversion are Unicode objects, the result will also be a Unicode\nobject.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [5]  Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The "\'%\'" character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence\n   of characters (for example, "(somename)").\n\n3. Conversion flags (optional), which affect the result of some\n   conversion types.\n\n4. Minimum field width (optional).  If specified as an "\'*\'"\n   (asterisk), the actual width is read from the next element of the\n   tuple in *values*, and the object to convert comes after the\n   minimum field width and optional precision.\n\n5. Precision (optional), given as a "\'.\'" (dot) followed by the\n   precision.  If specified as "\'*\'" (an asterisk), the actual width\n   is read from the next element of the tuple in *values*, and the\n   value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the "\'%\'" character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \'%(language)s has %(number)03d quote types.\' % \\\n...       {"language": "Python", "number": 2}\nPython has 002 quote types.\n\nIn this case no "*" specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag      | Meaning                                                               |\n+===========+=======================================================================+\n| "\'#\'"     | The value conversion will use the "alternate form" (where defined     |\n|           | below).                                                               |\n+-----------+-----------------------------------------------------------------------+\n| "\'0\'"     | The conversion will be zero padded for numeric values.                |\n+-----------+-----------------------------------------------------------------------+\n| "\'-\'"     | The converted value is left adjusted (overrides the "\'0\'" conversion  |\n|           | if both are given).                                                   |\n+-----------+-----------------------------------------------------------------------+\n| "\' \'"     | (a space) A blank should be left before a positive number (or empty   |\n|           | string) produced by a signed conversion.                              |\n+-----------+-----------------------------------------------------------------------+\n| "\'+\'"     | A sign character ("\'+\'" or "\'-\'") will precede the conversion         |\n|           | (overrides a "space" flag).                                           |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier ("h", "l", or "L") may be present, but is ignored as\nit is not necessary for Python -- so e.g. "%ld" is identical to "%d".\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion   | Meaning                                               | Notes   |\n+==============+=======================================================+=========+\n| "\'d\'"        | Signed integer decimal.                               |         |\n+--------------+-------------------------------------------------------+---------+\n| "\'i\'"        | Signed integer decimal.                               |         |\n+--------------+-------------------------------------------------------+---------+\n| "\'o\'"        | Signed octal value.                                   | (1)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'u\'"        | Obsolete type -- it is identical to "\'d\'".            | (7)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'x\'"        | Signed hexadecimal (lowercase).                       | (2)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'X\'"        | Signed hexadecimal (uppercase).                       | (2)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'e\'"        | Floating point exponential format (lowercase).        | (3)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'E\'"        | Floating point exponential format (uppercase).        | (3)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'f\'"        | Floating point decimal format.                        | (3)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'F\'"        | Floating point decimal format.                        | (3)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'g\'"        | Floating point format. Uses lowercase exponential     | (4)     |\n|              | format if exponent is less than -4 or not less than   |         |\n|              | precision, decimal format otherwise.                  |         |\n+--------------+-------------------------------------------------------+---------+\n| "\'G\'"        | Floating point format. Uses uppercase exponential     | (4)     |\n|              | format if exponent is less than -4 or not less than   |         |\n|              | precision, decimal format otherwise.                  |         |\n+--------------+-------------------------------------------------------+---------+\n| "\'c\'"        | Single character (accepts integer or single character |         |\n|              | string).                                              |         |\n+--------------+-------------------------------------------------------+---------+\n| "\'r\'"        | String (converts any Python object using repr()).     | (5)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'s\'"        | String (converts any Python object using "str()").    | (6)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'%\'"        | No argument is converted, results in a "\'%\'"          |         |\n|              | character in the result.                              |         |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero ("\'0\'") to be inserted\n   between left-hand padding and the formatting of the number if the\n   leading character of the result is not already a zero.\n\n2. The alternate form causes a leading "\'0x\'" or "\'0X\'" (depending\n   on whether the "\'x\'" or "\'X\'" format was used) to be inserted\n   between left-hand padding and the formatting of the number if the\n   leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n   point, even if no digits follow it.\n\n   The precision determines the number of digits after the decimal\n   point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n   point, and trailing zeroes are not removed as they would otherwise\n   be.\n\n   The precision determines the number of significant digits before\n   and after the decimal point and defaults to 6.\n\n5. The "%r" conversion was added in Python 2.0.\n\n   The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a "unicode" string, the\n   resulting string will also be "unicode".\n\n   The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, "%s" conversions do not\nassume that "\'\\0\'" is the end of the string.\n\nChanged in version 2.7: "%f" conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by "%g" conversions.\n\nAdditional string operations are defined in standard modules "string"\nand "re".\n\n\nXRange Type\n===========\n\nThe "xrange" type is an immutable sequence which is commonly used for\nlooping.  The advantage of the "xrange" type is that an "xrange"\nobject will always take the same amount of memory, no matter the size\nof the range it represents.  There are no consistent performance\nadvantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the "len()" function.\n\n\nMutable Sequence Types\n======================\n\nList and "bytearray" objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation                      | Result                           | Notes                 |\n+================================+==================================+=======================+\n| "s[i] = x"                     | item *i* of *s* is replaced by   |                       |\n|                                | *x*                              |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t"                   | slice of *s* from *i* to *j* is  |                       |\n|                                | replaced by the contents of the  |                       |\n|                                | iterable *t*                     |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]"                   | same as "s[i:j] = []"            |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t"                 | the elements of "s[i:j:k]" are   | (1)                   |\n|                                | replaced by those of *t*         |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]"                 | removes the elements of          |                       |\n|                                | "s[i:j:k]" from the list         |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)"                  | same as "s[len(s):len(s)] = [x]" | (2)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(x)"                  | same as "s[len(s):len(s)] = x"   | (3)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.count(x)"                   | return number of *i*\'s for which |                       |\n|                                | "s[i] == x"                      |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.index(x[, i[, j]])"         | return smallest *k* such that    | (4)                   |\n|                                | "s[k] == x" and "i <= k < j"     |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)"               | same as "s[i:i] = [x]"           | (5)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])"                   | same as "x = s[i]; del s[i];     | (6)                   |\n|                                | return x"                        |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)"                  | same as "del s[s.index(x)]"      | (4)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()"                  | reverses the items of *s* in     | (7)                   |\n|                                | place                            |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.sort([cmp[, key[,           | sort the items of *s* in place   | (7)(8)(9)(10)         |\n| reverse]]])"                   |                                  |                       |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is  replacing.\n\n2. The C implementation of Python has historically accepted\n   multiple parameters and implicitly joined them into a tuple; this\n   no longer works in Python 2.0.  Use of this misfeature has been\n   deprecated since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises "ValueError" when *x* is not found in *s*. When a\n   negative index is passed as the second or third parameter to the\n   "index()" method, the list length is added, as for slice indices.\n   If it is still negative, it is truncated to zero, as for slice\n   indices.\n\n   Changed in version 2.3: Previously, "index()" didn\'t have arguments\n   for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n   "insert()" method, the list length is added, as for slice indices.\n   If it is still negative, it is truncated to zero, as for slice\n   indices.\n\n   Changed in version 2.3: Previously, all negative indices were\n   truncated to zero.\n\n6. The "pop()" method\'s optional argument *i* defaults to "-1", so\n   that by default the last item is removed and returned.\n\n7. The "sort()" and "reverse()" methods modify the list in place\n   for economy of space when sorting or reversing a large list.  To\n   remind you that they operate by side effect, they don\'t return the\n   sorted or reversed list.\n\n8. The "sort()" method takes optional arguments for controlling the\n   comparisons.\n\n   *cmp* specifies a custom comparison function of two arguments (list\n   items) which should return a negative, zero or positive number\n   depending on whether the first argument is considered smaller than,\n   equal to, or larger than the second argument: "cmp=lambda x,y:\n   cmp(x.lower(), y.lower())".  The default value is "None".\n\n   *key* specifies a function of one argument that is used to extract\n   a comparison key from each list element: "key=str.lower".  The\n   default value is "None".\n\n   *reverse* is a boolean value.  If set to "True", then the list\n   elements are sorted as if each comparison were reversed.\n\n   In general, the *key* and *reverse* conversion processes are much\n   faster than specifying an equivalent *cmp* function.  This is\n   because *cmp* is called multiple times for each list element while\n   *key* and *reverse* touch each element only once.  Use\n   "functools.cmp_to_key()" to convert an old-style *cmp* function to\n   a *key* function.\n\n   Changed in version 2.3: Support for "None" as an equivalent to\n   omitting *cmp* was added.\n\n   Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the "sort()" method is guaranteed to\n   be stable.  A sort is stable if it guarantees not to change the\n   relative order of elements that compare equal --- this is helpful\n   for sorting in multiple passes (for example, sort by department,\n   then by salary grade).\n\n10. **CPython implementation detail:** While a list is being\n    sorted, the effect of attempting to mutate, or even inspect, the\n    list is undefined.  The C implementation of Python 2.3 and newer\n    makes the list appear empty for the duration, and raises\n    "ValueError" if it can detect that the list has been mutated\n    during a sort.\n',
- 'typesseq-mutable': u'\nMutable Sequence Types\n**********************\n\nList and "bytearray" objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation                      | Result                           | Notes                 |\n+================================+==================================+=======================+\n| "s[i] = x"                     | item *i* of *s* is replaced by   |                       |\n|                                | *x*                              |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t"                   | slice of *s* from *i* to *j* is  |                       |\n|                                | replaced by the contents of the  |                       |\n|                                | iterable *t*                     |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]"                   | same as "s[i:j] = []"            |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t"                 | the elements of "s[i:j:k]" are   | (1)                   |\n|                                | replaced by those of *t*         |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]"                 | removes the elements of          |                       |\n|                                | "s[i:j:k]" from the list         |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)"                  | same as "s[len(s):len(s)] = [x]" | (2)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(x)"                  | same as "s[len(s):len(s)] = x"   | (3)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.count(x)"                   | return number of *i*\'s for which |                       |\n|                                | "s[i] == x"                      |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.index(x[, i[, j]])"         | return smallest *k* such that    | (4)                   |\n|                                | "s[k] == x" and "i <= k < j"     |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)"               | same as "s[i:i] = [x]"           | (5)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])"                   | same as "x = s[i]; del s[i];     | (6)                   |\n|                                | return x"                        |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)"                  | same as "del s[s.index(x)]"      | (4)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()"                  | reverses the items of *s* in     | (7)                   |\n|                                | place                            |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.sort([cmp[, key[,           | sort the items of *s* in place   | (7)(8)(9)(10)         |\n| reverse]]])"                   |                                  |                       |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is  replacing.\n\n2. The C implementation of Python has historically accepted\n   multiple parameters and implicitly joined them into a tuple; this\n   no longer works in Python 2.0.  Use of this misfeature has been\n   deprecated since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises "ValueError" when *x* is not found in *s*. When a\n   negative index is passed as the second or third parameter to the\n   "index()" method, the list length is added, as for slice indices.\n   If it is still negative, it is truncated to zero, as for slice\n   indices.\n\n   Changed in version 2.3: Previously, "index()" didn\'t have arguments\n   for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n   "insert()" method, the list length is added, as for slice indices.\n   If it is still negative, it is truncated to zero, as for slice\n   indices.\n\n   Changed in version 2.3: Previously, all negative indices were\n   truncated to zero.\n\n6. The "pop()" method\'s optional argument *i* defaults to "-1", so\n   that by default the last item is removed and returned.\n\n7. The "sort()" and "reverse()" methods modify the list in place\n   for economy of space when sorting or reversing a large list.  To\n   remind you that they operate by side effect, they don\'t return the\n   sorted or reversed list.\n\n8. The "sort()" method takes optional arguments for controlling the\n   comparisons.\n\n   *cmp* specifies a custom comparison function of two arguments (list\n   items) which should return a negative, zero or positive number\n   depending on whether the first argument is considered smaller than,\n   equal to, or larger than the second argument: "cmp=lambda x,y:\n   cmp(x.lower(), y.lower())".  The default value is "None".\n\n   *key* specifies a function of one argument that is used to extract\n   a comparison key from each list element: "key=str.lower".  The\n   default value is "None".\n\n   *reverse* is a boolean value.  If set to "True", then the list\n   elements are sorted as if each comparison were reversed.\n\n   In general, the *key* and *reverse* conversion processes are much\n   faster than specifying an equivalent *cmp* function.  This is\n   because *cmp* is called multiple times for each list element while\n   *key* and *reverse* touch each element only once.  Use\n   "functools.cmp_to_key()" to convert an old-style *cmp* function to\n   a *key* function.\n\n   Changed in version 2.3: Support for "None" as an equivalent to\n   omitting *cmp* was added.\n\n   Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the "sort()" method is guaranteed to\n   be stable.  A sort is stable if it guarantees not to change the\n   relative order of elements that compare equal --- this is helpful\n   for sorting in multiple passes (for example, sort by department,\n   then by salary grade).\n\n10. **CPython implementation detail:** While a list is being\n    sorted, the effect of attempting to mutate, or even inspect, the\n    list is undefined.  The C implementation of Python 2.3 and newer\n    makes the list appear empty for the duration, and raises\n    "ValueError" if it can detect that the list has been mutated\n    during a sort.\n',
+ 'typesseq': u'\nSequence Types --- "str", "unicode", "list", "tuple", "bytearray", "buffer", "xrange"\n*************************************************************************************\n\nThere are seven sequence types: strings, Unicode strings, lists,\ntuples, bytearrays, buffers, and xrange objects.\n\nFor other containers see the built in "dict" and "set" classes, and\nthe "collections" module.\n\nString literals are written in single or double quotes: "\'xyzzy\'",\n""frobozz"".  See String literals for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding "\'u\'" character: "u\'abc\'", "u"def"". In addition to\nthe functionality described here, there are also string-specific\nmethods described in the String Methods section. Lists are constructed\nwith square brackets, separating items with commas: "[a, b, c]".\nTuples are constructed by the comma operator (not within square\nbrackets), with or without enclosing parentheses, but an empty tuple\nmust have the enclosing parentheses, such as "a, b, c" or "()".  A\nsingle item tuple must have a trailing comma, such as "(d,)".\n\nBytearray objects are created with the built-in function\n"bytearray()".\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the built-in function "buffer()".  They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n"xrange()" function.  They don\'t support slicing, concatenation or\nrepetition, and using "in", "not in", "min()" or "max()" on them is\ninefficient.\n\nMost sequence types support the following operations.  The "in" and\n"not in" operations have the same priorities as the comparison\noperations.  The "+" and "*" operations have the same priority as the\ncorresponding numeric operations. [3] Additional methods are provided\nfor Mutable Sequence Types.\n\nThis table lists the sequence operations sorted in ascending priority.\nIn the table, *s* and *t* are sequences of the same type; *n*, *i* and\n*j* are integers:\n\n+--------------------+----------------------------------+------------+\n| Operation          | Result                           | Notes      |\n+====================+==================================+============+\n| "x in s"           | "True" if an item of *s* is      | (1)        |\n|                    | equal to *x*, else "False"       |            |\n+--------------------+----------------------------------+------------+\n| "x not in s"       | "False" if an item of *s* is     | (1)        |\n|                    | equal to *x*, else "True"        |            |\n+--------------------+----------------------------------+------------+\n| "s + t"            | the concatenation of *s* and *t* | (6)        |\n+--------------------+----------------------------------+------------+\n| "s * n, n * s"     | equivalent to adding *s* to      | (2)        |\n|                    | itself *n* times                 |            |\n+--------------------+----------------------------------+------------+\n| "s[i]"             | *i*th item of *s*, origin 0      | (3)        |\n+--------------------+----------------------------------+------------+\n| "s[i:j]"           | slice of *s* from *i* to *j*     | (3)(4)     |\n+--------------------+----------------------------------+------------+\n| "s[i:j:k]"         | slice of *s* from *i* to *j*     | (3)(5)     |\n|                    | with step *k*                    |            |\n+--------------------+----------------------------------+------------+\n| "len(s)"           | length of *s*                    |            |\n+--------------------+----------------------------------+------------+\n| "min(s)"           | smallest item of *s*             |            |\n+--------------------+----------------------------------+------------+\n| "max(s)"           | largest item of *s*              |            |\n+--------------------+----------------------------------+------------+\n| "s.index(x)"       | index of the first occurrence of |            |\n|                    | *x* in *s*                       |            |\n+--------------------+----------------------------------+------------+\n| "s.count(x)"       | total number of occurrences of   |            |\n|                    | *x* in *s*                       |            |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see Comparisons in the language reference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the "in" and "not\n   in" operations act like a substring test.  In Python versions\n   before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n   beyond, *x* may be a string of any length.\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n   empty sequence of the same type as *s*).  Note that items in the\n   sequence *s* are not copied; they are referenced multiple times.\n   This often haunts new Python programmers; consider:\n\n   >>> lists = [[]] * 3\n   >>> lists\n   [[], [], []]\n   >>> lists[0].append(3)\n   >>> lists\n   [[3], [3], [3]]\n\n   What has happened is that "[[]]" is a one-element list containing\n   an empty list, so all three elements of "[[]] * 3" are references\n   to this single empty list.  Modifying any of the elements of\n   "lists" modifies this single list. You can create a list of\n   different lists this way:\n\n   >>> lists = [[] for i in range(3)]\n   >>> lists[0].append(3)\n   >>> lists[1].append(5)\n   >>> lists[2].append(7)\n   >>> lists\n   [[3], [5], [7]]\n\n   Further explanation is available in the FAQ entry How do I create a\n   multidimensional list?.\n\n3. If *i* or *j* is negative, the index is relative to the end of\n   the string: "len(s) + i" or "len(s) + j" is substituted.  But note\n   that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n   items with index *k* such that "i <= k < j".  If *i* or *j* is\n   greater than "len(s)", use "len(s)".  If *i* is omitted or "None",\n   use "0".  If *j* is omitted or "None", use "len(s)".  If *i* is\n   greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n   sequence of items with index  "x = i + n*k" such that "0 <= n <\n   (j-i)/k".  In other words, the indices are "i", "i+k", "i+2*k",\n   "i+3*k" and so on, stopping when *j* is reached (but never\n   including *j*).  If *i* or *j* is greater than "len(s)", use\n   "len(s)".  If *i* or *j* are omitted or "None", they become "end"\n   values (which end depends on the sign of *k*).  Note, *k* cannot be\n   zero. If *k* is "None", it is treated like "1".\n\n6. **CPython implementation detail:** If *s* and *t* are both\n   strings, some Python implementations such as CPython can usually\n   perform an in-place optimization for assignments of the form "s = s\n   + t" or "s += t".  When applicable, this optimization makes\n   quadratic run-time much less likely.  This optimization is both\n   version and implementation dependent.  For performance sensitive\n   code, it is preferable to use the "str.join()" method which assures\n   consistent linear concatenation performance across versions and\n   implementations.\n\n   Changed in version 2.4: Formerly, string concatenation never\n   occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support.  Some of them are also available on\n"bytearray" objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange section. To output formatted strings use\ntemplate strings or the "%" operator described in the String\nFormatting Operations section. Also, see the "re" module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n   Return a copy of the string with its first character capitalized\n   and the rest lowercased.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n   Return centered in a string of length *width*. Padding is done\n   using the specified *fillchar* (default is a space).\n\n   Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n   Return the number of non-overlapping occurrences of substring *sub*\n   in the range [*start*, *end*].  Optional arguments *start* and\n   *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n   Decodes the string using the codec registered for *encoding*.\n   *encoding* defaults to the default string encoding.  *errors* may\n   be given to set a different error handling scheme.  The default is\n   "\'strict\'", meaning that encoding errors raise "UnicodeError".\n   Other possible values are "\'ignore\'", "\'replace\'" and any other\n   name registered via "codecs.register_error()", see section Codec\n   Base Classes.\n\n   New in version 2.2.\n\n   Changed in version 2.3: Support for other error handling schemes\n   added.\n\n   Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n   Return an encoded version of the string.  Default encoding is the\n   current default string encoding.  *errors* may be given to set a\n   different error handling scheme.  The default for *errors* is\n   "\'strict\'", meaning that encoding errors raise a "UnicodeError".\n   Other possible values are "\'ignore\'", "\'replace\'",\n   "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other name\n   registered via "codecs.register_error()", see section Codec Base\n   Classes. For a list of possible encodings, see section Standard\n   Encodings.\n\n   New in version 2.0.\n\n   Changed in version 2.3: Support for "\'xmlcharrefreplace\'" and\n   "\'backslashreplace\'" and other error handling schemes added.\n\n   Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n   Return "True" if the string ends with the specified *suffix*,\n   otherwise return "False".  *suffix* can also be a tuple of suffixes\n   to look for.  With optional *start*, test beginning at that\n   position.  With optional *end*, stop comparing at that position.\n\n   Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n   Return a copy of the string where all tab characters are replaced\n   by one or more spaces, depending on the current column and the\n   given tab size.  Tab positions occur every *tabsize* characters\n   (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n   To expand the string, the current column is set to zero and the\n   string is examined character by character.  If the character is a\n   tab ("\\t"), one or more space characters are inserted in the result\n   until the current column is equal to the next tab position. (The\n   tab character itself is not copied.)  If the character is a newline\n   ("\\n") or return ("\\r"), it is copied and the current column is\n   reset to zero.  Any other character is copied unchanged and the\n   current column is incremented by one regardless of how the\n   character is represented when printed.\n\n   >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n   \'01      012     0123    01234\'\n   >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n   \'01  012 0123    01234\'\n\nstr.find(sub[, start[, end]])\n\n   Return the lowest index in the string where substring *sub* is\n   found within the slice "s[start:end]".  Optional arguments *start*\n   and *end* are interpreted as in slice notation.  Return "-1" if\n   *sub* is not found.\n\n   Note: The "find()" method should be used only if you need to know\n     the position of *sub*.  To check if *sub* is a substring or not,\n     use the "in" operator:\n\n        >>> \'Py\' in \'Python\'\n        True\n\nstr.format(*args, **kwargs)\n\n   Perform a string formatting operation.  The string on which this\n   method is called can contain literal text or replacement fields\n   delimited by braces "{}".  Each replacement field contains either\n   the numeric index of a positional argument, or the name of a\n   keyword argument.  Returns a copy of the string where each\n   replacement field is replaced with the string value of the\n   corresponding argument.\n\n   >>> "The sum of 1 + 2 is {0}".format(1+2)\n   \'The sum of 1 + 2 is 3\'\n\n   See Format String Syntax for a description of the various\n   formatting options that can be specified in format strings.\n\n   This method of string formatting is the new standard in Python 3,\n   and should be preferred to the "%" formatting described in String\n   Formatting Operations in new code.\n\n   New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n   Like "find()", but raise "ValueError" when the substring is not\n   found.\n\nstr.isalnum()\n\n   Return true if all characters in the string are alphanumeric and\n   there is at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n   Return true if all characters in the string are alphabetic and\n   there is at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n   Return true if all characters in the string are digits and there is\n   at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n   Return true if all cased characters [4] in the string are lowercase\n   and there is at least one cased character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n   Return true if there are only whitespace characters in the string\n   and there is at least one character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n   Return true if the string is a titlecased string and there is at\n   least one character, for example uppercase characters may only\n   follow uncased characters and lowercase characters only cased ones.\n   Return false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n   Return true if all cased characters [4] in the string are uppercase\n   and there is at least one cased character, false otherwise.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n   Return a string which is the concatenation of the strings in the\n   *iterable* *iterable*.  The separator between elements is the\n   string providing this method.\n\nstr.ljust(width[, fillchar])\n\n   Return the string left justified in a string of length *width*.\n   Padding is done using the specified *fillchar* (default is a\n   space).  The original string is returned if *width* is less than or\n   equal to "len(s)".\n\n   Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n   Return a copy of the string with all the cased characters [4]\n   converted to lowercase.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n   Return a copy of the string with leading characters removed.  The\n   *chars* argument is a string specifying the set of characters to be\n   removed.  If omitted or "None", the *chars* argument defaults to\n   removing whitespace.  The *chars* argument is not a prefix; rather,\n   all combinations of its values are stripped:\n\n   >>> \'   spacious   \'.lstrip()\n   \'spacious   \'\n   >>> \'www.example.com\'.lstrip(\'cmowz.\')\n   \'example.com\'\n\n   Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n   Split the string at the first occurrence of *sep*, and return a\n   3-tuple containing the part before the separator, the separator\n   itself, and the part after the separator.  If the separator is not\n   found, return a 3-tuple containing the string itself, followed by\n   two empty strings.\n\n   New in version 2.5.\n\nstr.replace(old, new[, count])\n\n   Return a copy of the string with all occurrences of substring *old*\n   replaced by *new*.  If the optional argument *count* is given, only\n   the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n   Return the highest index in the string where substring *sub* is\n   found, such that *sub* is contained within "s[start:end]".\n   Optional arguments *start* and *end* are interpreted as in slice\n   notation.  Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n   Like "rfind()" but raises "ValueError" when the substring *sub* is\n   not found.\n\nstr.rjust(width[, fillchar])\n\n   Return the string right justified in a string of length *width*.\n   Padding is done using the specified *fillchar* (default is a\n   space). The original string is returned if *width* is less than or\n   equal to "len(s)".\n\n   Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n   Split the string at the last occurrence of *sep*, and return a\n   3-tuple containing the part before the separator, the separator\n   itself, and the part after the separator.  If the separator is not\n   found, return a 3-tuple containing two empty strings, followed by\n   the string itself.\n\n   New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n   Return a list of the words in the string, using *sep* as the\n   delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n   are done, the *rightmost* ones.  If *sep* is not specified or\n   "None", any whitespace string is a separator.  Except for splitting\n   from the right, "rsplit()" behaves like "split()" which is\n   described in detail below.\n\n   New in version 2.4.\n\nstr.rstrip([chars])\n\n   Return a copy of the string with trailing characters removed.  The\n   *chars* argument is a string specifying the set of characters to be\n   removed.  If omitted or "None", the *chars* argument defaults to\n   removing whitespace.  The *chars* argument is not a suffix; rather,\n   all combinations of its values are stripped:\n\n   >>> \'   spacious   \'.rstrip()\n   \'   spacious\'\n   >>> \'mississippi\'.rstrip(\'ipz\')\n   \'mississ\'\n\n   Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n   Return a list of the words in the string, using *sep* as the\n   delimiter string.  If *maxsplit* is given, at most *maxsplit*\n   splits are done (thus, the list will have at most "maxsplit+1"\n   elements).  If *maxsplit* is not specified or "-1", then there is\n   no limit on the number of splits (all possible splits are made).\n\n   If *sep* is given, consecutive delimiters are not grouped together\n   and are deemed to delimit empty strings (for example,\n   "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']").  The *sep* argument\n   may consist of multiple characters (for example,\n   "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n   empty string with a specified separator returns "[\'\']".\n\n   If *sep* is not specified or is "None", a different splitting\n   algorithm is applied: runs of consecutive whitespace are regarded\n   as a single separator, and the result will contain no empty strings\n   at the start or end if the string has leading or trailing\n   whitespace.  Consequently, splitting an empty string or a string\n   consisting of just whitespace with a "None" separator returns "[]".\n\n   For example, "\' 1  2   3  \'.split()" returns "[\'1\', \'2\', \'3\']", and\n   "\'  1  2   3  \'.split(None, 1)" returns "[\'1\', \'2   3  \']".\n\nstr.splitlines([keepends])\n\n   Return a list of the lines in the string, breaking at line\n   boundaries. This method uses the *universal newlines* approach to\n   splitting lines. Line breaks are not included in the resulting list\n   unless *keepends* is given and true.\n\n   For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n   c\', \'\', \'de fg\', \'kl\']", while the same call with\n   "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']".\n\n   Unlike "split()" when a delimiter string *sep* is given, this\n   method returns an empty list for the empty string, and a terminal\n   line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n   Return "True" if string starts with the *prefix*, otherwise return\n   "False". *prefix* can also be a tuple of prefixes to look for.\n   With optional *start*, test string beginning at that position.\n   With optional *end*, stop comparing string at that position.\n\n   Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n   Return a copy of the string with the leading and trailing\n   characters removed. The *chars* argument is a string specifying the\n   set of characters to be removed. If omitted or "None", the *chars*\n   argument defaults to removing whitespace. The *chars* argument is\n   not a prefix or suffix; rather, all combinations of its values are\n   stripped:\n\n   >>> \'   spacious   \'.strip()\n   \'spacious\'\n   >>> \'www.example.com\'.strip(\'cmowz.\')\n   \'example\'\n\n   Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n   Return a copy of the string with uppercase characters converted to\n   lowercase and vice versa.\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n   Return a titlecased version of the string where words start with an\n   uppercase character and the remaining characters are lowercase.\n\n   The algorithm uses a simple language-independent definition of a\n   word as groups of consecutive letters.  The definition works in\n   many contexts but it means that apostrophes in contractions and\n   possessives form word boundaries, which may not be the desired\n   result:\n\n      >>> "they\'re bill\'s friends from the UK".title()\n      "They\'Re Bill\'S Friends From The Uk"\n\n   A workaround for apostrophes can be constructed using regular\n   expressions:\n\n      >>> import re\n      >>> def titlecase(s):\n      ...     return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n      ...                   lambda mo: mo.group(0)[0].upper() +\n      ...                              mo.group(0)[1:].lower(),\n      ...                   s)\n      ...\n      >>> titlecase("they\'re bill\'s friends.")\n      "They\'re Bill\'s Friends."\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n   Return a copy of the string where all characters occurring in the\n   optional argument *deletechars* are removed, and the remaining\n   characters have been mapped through the given translation table,\n   which must be a string of length 256.\n\n   You can use the "maketrans()" helper function in the "string"\n   module to create a translation table. For string objects, set the\n   *table* argument to "None" for translations that only delete\n   characters:\n\n   >>> \'read this short text\'.translate(None, \'aeiou\')\n   \'rd ths shrt txt\'\n\n   New in version 2.6: Support for a "None" *table* argument.\n\n   For Unicode objects, the "translate()" method does not accept the\n   optional *deletechars* argument.  Instead, it returns a copy of the\n   *s* where all characters have been mapped through the given\n   translation table which must be a mapping of Unicode ordinals to\n   Unicode ordinals, Unicode strings or "None". Unmapped characters\n   are left untouched. Characters mapped to "None" are deleted.  Note,\n   a more flexible approach is to create a custom character mapping\n   codec using the "codecs" module (see "encodings.cp1251" for an\n   example).\n\nstr.upper()\n\n   Return a copy of the string with all the cased characters [4]\n   converted to uppercase.  Note that "str.upper().isupper()" might be\n   "False" if "s" contains uncased characters or if the Unicode\n   category of the resulting character(s) is not "Lu" (Letter,\n   uppercase), but e.g. "Lt" (Letter, titlecase).\n\n   For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n   Return the numeric string left filled with zeros in a string of\n   length *width*.  A sign prefix is handled correctly.  The original\n   string is returned if *width* is less than or equal to "len(s)".\n\n   New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n   Return "True" if there are only numeric characters in S, "False"\n   otherwise. Numeric characters include digit characters, and all\n   characters that have the Unicode numeric value property, e.g.\n   U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n   Return "True" if there are only decimal characters in S, "False"\n   otherwise. Decimal characters include digit characters, and all\n   characters that can be used to form decimal-radix numbers, e.g.\n   U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the "%"\noperator (modulo).  This is also known as the string *formatting* or\n*interpolation* operator.  Given "format % values" (where *format* is\na string or Unicode object), "%" conversion specifications in *format*\nare replaced with zero or more elements of *values*.  The effect is\nsimilar to the using "sprintf()" in the C language.  If *format* is a\nUnicode object, or if any of the objects being converted using the\n"%s" conversion are Unicode objects, the result will also be a Unicode\nobject.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [5]  Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The "\'%\'" character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence\n   of characters (for example, "(somename)").\n\n3. Conversion flags (optional), which affect the result of some\n   conversion types.\n\n4. Minimum field width (optional).  If specified as an "\'*\'"\n   (asterisk), the actual width is read from the next element of the\n   tuple in *values*, and the object to convert comes after the\n   minimum field width and optional precision.\n\n5. Precision (optional), given as a "\'.\'" (dot) followed by the\n   precision.  If specified as "\'*\'" (an asterisk), the actual width\n   is read from the next element of the tuple in *values*, and the\n   value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the "\'%\'" character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \'%(language)s has %(number)03d quote types.\' % \\\n...       {"language": "Python", "number": 2}\nPython has 002 quote types.\n\nIn this case no "*" specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag      | Meaning                                                               |\n+===========+=======================================================================+\n| "\'#\'"     | The value conversion will use the "alternate form" (where defined     |\n|           | below).                                                               |\n+-----------+-----------------------------------------------------------------------+\n| "\'0\'"     | The conversion will be zero padded for numeric values.                |\n+-----------+-----------------------------------------------------------------------+\n| "\'-\'"     | The converted value is left adjusted (overrides the "\'0\'" conversion  |\n|           | if both are given).                                                   |\n+-----------+-----------------------------------------------------------------------+\n| "\' \'"     | (a space) A blank should be left before a positive number (or empty   |\n|           | string) produced by a signed conversion.                              |\n+-----------+-----------------------------------------------------------------------+\n| "\'+\'"     | A sign character ("\'+\'" or "\'-\'") will precede the conversion         |\n|           | (overrides a "space" flag).                                           |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier ("h", "l", or "L") may be present, but is ignored as\nit is not necessary for Python -- so e.g. "%ld" is identical to "%d".\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion   | Meaning                                               | Notes   |\n+==============+=======================================================+=========+\n| "\'d\'"        | Signed integer decimal.                               |         |\n+--------------+-------------------------------------------------------+---------+\n| "\'i\'"        | Signed integer decimal.                               |         |\n+--------------+-------------------------------------------------------+---------+\n| "\'o\'"        | Signed octal value.                                   | (1)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'u\'"        | Obsolete type -- it is identical to "\'d\'".            | (7)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'x\'"        | Signed hexadecimal (lowercase).                       | (2)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'X\'"        | Signed hexadecimal (uppercase).                       | (2)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'e\'"        | Floating point exponential format (lowercase).        | (3)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'E\'"        | Floating point exponential format (uppercase).        | (3)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'f\'"        | Floating point decimal format.                        | (3)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'F\'"        | Floating point decimal format.                        | (3)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'g\'"        | Floating point format. Uses lowercase exponential     | (4)     |\n|              | format if exponent is less than -4 or not less than   |         |\n|              | precision, decimal format otherwise.                  |         |\n+--------------+-------------------------------------------------------+---------+\n| "\'G\'"        | Floating point format. Uses uppercase exponential     | (4)     |\n|              | format if exponent is less than -4 or not less than   |         |\n|              | precision, decimal format otherwise.                  |         |\n+--------------+-------------------------------------------------------+---------+\n| "\'c\'"        | Single character (accepts integer or single character |         |\n|              | string).                                              |         |\n+--------------+-------------------------------------------------------+---------+\n| "\'r\'"        | String (converts any Python object using repr()).     | (5)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'s\'"        | String (converts any Python object using "str()").    | (6)     |\n+--------------+-------------------------------------------------------+---------+\n| "\'%\'"        | No argument is converted, results in a "\'%\'"          |         |\n|              | character in the result.                              |         |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero ("\'0\'") to be inserted\n   between left-hand padding and the formatting of the number if the\n   leading character of the result is not already a zero.\n\n2. The alternate form causes a leading "\'0x\'" or "\'0X\'" (depending\n   on whether the "\'x\'" or "\'X\'" format was used) to be inserted\n   between left-hand padding and the formatting of the number if the\n   leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n   point, even if no digits follow it.\n\n   The precision determines the number of digits after the decimal\n   point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n   point, and trailing zeroes are not removed as they would otherwise\n   be.\n\n   The precision determines the number of significant digits before\n   and after the decimal point and defaults to 6.\n\n5. The "%r" conversion was added in Python 2.0.\n\n   The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a "unicode" string, the\n   resulting string will also be "unicode".\n\n   The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, "%s" conversions do not\nassume that "\'\\0\'" is the end of the string.\n\nChanged in version 2.7: "%f" conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by "%g" conversions.\n\nAdditional string operations are defined in standard modules "string"\nand "re".\n\n\nXRange Type\n===========\n\nThe "xrange" type is an immutable sequence which is commonly used for\nlooping.  The advantage of the "xrange" type is that an "xrange"\nobject will always take the same amount of memory, no matter the size\nof the range it represents.  There are no consistent performance\nadvantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the "len()" function.\n\n\nMutable Sequence Types\n======================\n\nList and "bytearray" objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation                      | Result                           | Notes                 |\n+================================+==================================+=======================+\n| "s[i] = x"                     | item *i* of *s* is replaced by   |                       |\n|                                | *x*                              |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t"                   | slice of *s* from *i* to *j* is  |                       |\n|                                | replaced by the contents of the  |                       |\n|                                | iterable *t*                     |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]"                   | same as "s[i:j] = []"            |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t"                 | the elements of "s[i:j:k]" are   | (1)                   |\n|                                | replaced by those of *t*         |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]"                 | removes the elements of          |                       |\n|                                | "s[i:j:k]" from the list         |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)"                  | same as "s[len(s):len(s)] = [x]" | (2)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(x)" or "s += t"      | for the most part the same as    | (3)                   |\n|                                | "s[len(s):len(s)] = x"           |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s *= n"                       | updates *s* with its contents    | (11)                  |\n|                                | repeated *n* times               |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.count(x)"                   | return number of *i*\'s for which |                       |\n|                                | "s[i] == x"                      |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.index(x[, i[, j]])"         | return smallest *k* such that    | (4)                   |\n|                                | "s[k] == x" and "i <= k < j"     |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)"               | same as "s[i:i] = [x]"           | (5)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])"                   | same as "x = s[i]; del s[i];     | (6)                   |\n|                                | return x"                        |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)"                  | same as "del s[s.index(x)]"      | (4)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()"                  | reverses the items of *s* in     | (7)                   |\n|                                | place                            |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.sort([cmp[, key[,           | sort the items of *s* in place   | (7)(8)(9)(10)         |\n| reverse]]])"                   |                                  |                       |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is  replacing.\n\n2. The C implementation of Python has historically accepted\n   multiple parameters and implicitly joined them into a tuple; this\n   no longer works in Python 2.0.  Use of this misfeature has been\n   deprecated since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises "ValueError" when *x* is not found in *s*. When a\n   negative index is passed as the second or third parameter to the\n   "index()" method, the list length is added, as for slice indices.\n   If it is still negative, it is truncated to zero, as for slice\n   indices.\n\n   Changed in version 2.3: Previously, "index()" didn\'t have arguments\n   for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n   "insert()" method, the list length is added, as for slice indices.\n   If it is still negative, it is truncated to zero, as for slice\n   indices.\n\n   Changed in version 2.3: Previously, all negative indices were\n   truncated to zero.\n\n6. The "pop()" method\'s optional argument *i* defaults to "-1", so\n   that by default the last item is removed and returned.\n\n7. The "sort()" and "reverse()" methods modify the list in place\n   for economy of space when sorting or reversing a large list.  To\n   remind you that they operate by side effect, they don\'t return the\n   sorted or reversed list.\n\n8. The "sort()" method takes optional arguments for controlling the\n   comparisons.\n\n   *cmp* specifies a custom comparison function of two arguments (list\n   items) which should return a negative, zero or positive number\n   depending on whether the first argument is considered smaller than,\n   equal to, or larger than the second argument: "cmp=lambda x,y:\n   cmp(x.lower(), y.lower())".  The default value is "None".\n\n   *key* specifies a function of one argument that is used to extract\n   a comparison key from each list element: "key=str.lower".  The\n   default value is "None".\n\n   *reverse* is a boolean value.  If set to "True", then the list\n   elements are sorted as if each comparison were reversed.\n\n   In general, the *key* and *reverse* conversion processes are much\n   faster than specifying an equivalent *cmp* function.  This is\n   because *cmp* is called multiple times for each list element while\n   *key* and *reverse* touch each element only once.  Use\n   "functools.cmp_to_key()" to convert an old-style *cmp* function to\n   a *key* function.\n\n   Changed in version 2.3: Support for "None" as an equivalent to\n   omitting *cmp* was added.\n\n   Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the "sort()" method is guaranteed to\n   be stable.  A sort is stable if it guarantees not to change the\n   relative order of elements that compare equal --- this is helpful\n   for sorting in multiple passes (for example, sort by department,\n   then by salary grade).\n\n10. **CPython implementation detail:** While a list is being\n    sorted, the effect of attempting to mutate, or even inspect, the\n    list is undefined.  The C implementation of Python 2.3 and newer\n    makes the list appear empty for the duration, and raises\n    "ValueError" if it can detect that the list has been mutated\n    during a sort.\n\n11. The value *n* is an integer, or an object implementing\n    "__index__()".  Zero and negative values of *n* clear the\n    sequence.  Items in the sequence are not copied; they are\n    referenced multiple times, as explained for "s * n" under Sequence\n    Types --- str, unicode, list, tuple, bytearray, buffer, xrange.\n',
+ 'typesseq-mutable': u'\nMutable Sequence Types\n**********************\n\nList and "bytearray" objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation                      | Result                           | Notes                 |\n+================================+==================================+=======================+\n| "s[i] = x"                     | item *i* of *s* is replaced by   |                       |\n|                                | *x*                              |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t"                   | slice of *s* from *i* to *j* is  |                       |\n|                                | replaced by the contents of the  |                       |\n|                                | iterable *t*                     |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]"                   | same as "s[i:j] = []"            |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t"                 | the elements of "s[i:j:k]" are   | (1)                   |\n|                                | replaced by those of *t*         |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]"                 | removes the elements of          |                       |\n|                                | "s[i:j:k]" from the list         |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)"                  | same as "s[len(s):len(s)] = [x]" | (2)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(x)" or "s += t"      | for the most part the same as    | (3)                   |\n|                                | "s[len(s):len(s)] = x"           |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s *= n"                       | updates *s* with its contents    | (11)                  |\n|                                | repeated *n* times               |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.count(x)"                   | return number of *i*\'s for which |                       |\n|                                | "s[i] == x"                      |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.index(x[, i[, j]])"         | return smallest *k* such that    | (4)                   |\n|                                | "s[k] == x" and "i <= k < j"     |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)"               | same as "s[i:i] = [x]"           | (5)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])"                   | same as "x = s[i]; del s[i];     | (6)                   |\n|                                | return x"                        |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)"                  | same as "del s[s.index(x)]"      | (4)                   |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()"                  | reverses the items of *s* in     | (7)                   |\n|                                | place                            |                       |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.sort([cmp[, key[,           | sort the items of *s* in place   | (7)(8)(9)(10)         |\n| reverse]]])"                   |                                  |                       |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is  replacing.\n\n2. The C implementation of Python has historically accepted\n   multiple parameters and implicitly joined them into a tuple; this\n   no longer works in Python 2.0.  Use of this misfeature has been\n   deprecated since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises "ValueError" when *x* is not found in *s*. When a\n   negative index is passed as the second or third parameter to the\n   "index()" method, the list length is added, as for slice indices.\n   If it is still negative, it is truncated to zero, as for slice\n   indices.\n\n   Changed in version 2.3: Previously, "index()" didn\'t have arguments\n   for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n   "insert()" method, the list length is added, as for slice indices.\n   If it is still negative, it is truncated to zero, as for slice\n   indices.\n\n   Changed in version 2.3: Previously, all negative indices were\n   truncated to zero.\n\n6. The "pop()" method\'s optional argument *i* defaults to "-1", so\n   that by default the last item is removed and returned.\n\n7. The "sort()" and "reverse()" methods modify the list in place\n   for economy of space when sorting or reversing a large list.  To\n   remind you that they operate by side effect, they don\'t return the\n   sorted or reversed list.\n\n8. The "sort()" method takes optional arguments for controlling the\n   comparisons.\n\n   *cmp* specifies a custom comparison function of two arguments (list\n   items) which should return a negative, zero or positive number\n   depending on whether the first argument is considered smaller than,\n   equal to, or larger than the second argument: "cmp=lambda x,y:\n   cmp(x.lower(), y.lower())".  The default value is "None".\n\n   *key* specifies a function of one argument that is used to extract\n   a comparison key from each list element: "key=str.lower".  The\n   default value is "None".\n\n   *reverse* is a boolean value.  If set to "True", then the list\n   elements are sorted as if each comparison were reversed.\n\n   In general, the *key* and *reverse* conversion processes are much\n   faster than specifying an equivalent *cmp* function.  This is\n   because *cmp* is called multiple times for each list element while\n   *key* and *reverse* touch each element only once.  Use\n   "functools.cmp_to_key()" to convert an old-style *cmp* function to\n   a *key* function.\n\n   Changed in version 2.3: Support for "None" as an equivalent to\n   omitting *cmp* was added.\n\n   Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the "sort()" method is guaranteed to\n   be stable.  A sort is stable if it guarantees not to change the\n   relative order of elements that compare equal --- this is helpful\n   for sorting in multiple passes (for example, sort by department,\n   then by salary grade).\n\n10. **CPython implementation detail:** While a list is being\n    sorted, the effect of attempting to mutate, or even inspect, the\n    list is undefined.  The C implementation of Python 2.3 and newer\n    makes the list appear empty for the duration, and raises\n    "ValueError" if it can detect that the list has been mutated\n    during a sort.\n\n11. The value *n* is an integer, or an object implementing\n    "__index__()".  Zero and negative values of *n* clear the\n    sequence.  Items in the sequence are not copied; they are\n    referenced multiple times, as explained for "s * n" under Sequence\n    Types --- str, unicode, list, tuple, bytearray, buffer, xrange.\n',
  'unary': u'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n   u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary "-" (minus) operator yields the negation of its numeric\nargument.\n\nThe unary "+" (plus) operator yields its numeric argument unchanged.\n\nThe unary "~" (invert) operator yields the bitwise inversion of its\nplain or long integer argument.  The bitwise inversion of "x" is\ndefined as "-(x+1)".  It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n"TypeError" exception is raised.\n',
  'while': u'\nThe "while" statement\n*********************\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n   while_stmt ::= "while" expression ":" suite\n                  ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite.  A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n',
- 'with': u'\nThe "with" statement\n********************\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section With Statement\nContext Managers). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n   with_stmt ::= "with" with_item ("," with_item)* ":" suite\n   with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n   is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n   value from "__enter__()" is assigned to it.\n\n   Note: The "with" statement guarantees that if the "__enter__()"\n     method returns without an error, then "__exit__()" will always be\n     called. Thus, if an error occurs during the assignment to the\n     target list, it will be treated the same as an error occurring\n     within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n   exception caused the suite to be exited, its type, value, and\n   traceback are passed as arguments to "__exit__()". Otherwise, three\n   "None" arguments are supplied.\n\n   If the suite was exited due to an exception, and the return value\n   from the "__exit__()" method was false, the exception is reraised.\n   If the return value was true, the exception is suppressed, and\n   execution continues with the statement following the "with"\n   statement.\n\n   If the suite was exited for any reason other than an exception, the\n   return value from "__exit__()" is ignored, and execution proceeds\n   at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n   with A() as a, B() as b:\n       suite\n\nis equivalent to\n\n   with A() as a:\n       with B() as b:\n           suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n  "with_statement" feature has been enabled.  It is always enabled in\n  Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n     The specification, background, and examples for the Python "with"\n     statement.\n',
- 'yield': u'\nThe "yield" statement\n*********************\n\n   yield_stmt ::= yield_expression\n\nThe "yield" statement is only used when defining a generator function,\nand is only used in the body of the generator function. Using a\n"yield" statement in a function definition is sufficient to cause that\ndefinition to create a generator function instead of a normal\nfunction.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator.  The body of the\ngenerator function is executed by calling the generator\'s "next()"\nmethod repeatedly until it raises an exception.\n\nWhen a "yield" statement is executed, the state of the generator is\nfrozen and the value of "expression_list" is returned to "next()"\'s\ncaller.  By "frozen" we mean that all local state is retained,\nincluding the current bindings of local variables, the instruction\npointer, and the internal evaluation stack: enough information is\nsaved so that the next time "next()" is invoked, the function can\nproceed exactly as if the "yield" statement were just another external\ncall.\n\nAs of Python version 2.5, the "yield" statement is now allowed in the\n"try" clause of a "try" ...  "finally" construct.  If the generator is\nnot resumed before it is finalized (by reaching a zero reference count\nor by being garbage collected), the generator-iterator\'s "close()"\nmethod will be called, allowing any pending "finally" clauses to\nexecute.\n\nFor full details of "yield" semantics, refer to the Yield expressions\nsection.\n\nNote: In Python 2.2, the "yield" statement was only allowed when the\n  "generators" feature has been enabled.  This "__future__" import\n  statement was used to enable the feature:\n\n     from __future__ import generators\n\nSee also: **PEP 0255** - Simple Generators\n\n     The proposal for adding generators and the "yield" statement to\n     Python.\n\n  **PEP 0342** - Coroutines via Enhanced Generators\n     The proposal that, among other generator enhancements, proposed\n     allowing "yield" to appear inside a "try" ... "finally" block.\n'}
+ 'with': u'\nThe "with" statement\n********************\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section With Statement\nContext Managers). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n   with_stmt ::= "with" with_item ("," with_item)* ":" suite\n   with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n   is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n   value from "__enter__()" is assigned to it.\n\n   Note: The "with" statement guarantees that if the "__enter__()"\n     method returns without an error, then "__exit__()" will always be\n     called. Thus, if an error occurs during the assignment to the\n     target list, it will be treated the same as an error occurring\n     within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n   exception caused the suite to be exited, its type, value, and\n   traceback are passed as arguments to "__exit__()". Otherwise, three\n   "None" arguments are supplied.\n\n   If the suite was exited due to an exception, and the return value\n   from the "__exit__()" method was false, the exception is reraised.\n   If the return value was true, the exception is suppressed, and\n   execution continues with the statement following the "with"\n   statement.\n\n   If the suite was exited for any reason other than an exception, the\n   return value from "__exit__()" is ignored, and execution proceeds\n   at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n   with A() as a, B() as b:\n       suite\n\nis equivalent to\n\n   with A() as a:\n       with B() as b:\n           suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n  "with_statement" feature has been enabled.  It is always enabled in\n  Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n  **PEP 0343** - The "with" statement\n     The specification, background, and examples for the Python "with"\n     statement.\n',
+ 'yield': u'\nThe "yield" statement\n*********************\n\n   yield_stmt ::= yield_expression\n\nThe "yield" statement is only used when defining a generator function,\nand is only used in the body of the generator function. Using a\n"yield" statement in a function definition is sufficient to cause that\ndefinition to create a generator function instead of a normal\nfunction.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator.  The body of the\ngenerator function is executed by calling the generator\'s "next()"\nmethod repeatedly until it raises an exception.\n\nWhen a "yield" statement is executed, the state of the generator is\nfrozen and the value of "expression_list" is returned to "next()"\'s\ncaller.  By "frozen" we mean that all local state is retained,\nincluding the current bindings of local variables, the instruction\npointer, and the internal evaluation stack: enough information is\nsaved so that the next time "next()" is invoked, the function can\nproceed exactly as if the "yield" statement were just another external\ncall.\n\nAs of Python version 2.5, the "yield" statement is now allowed in the\n"try" clause of a "try" ...  "finally" construct.  If the generator is\nnot resumed before it is finalized (by reaching a zero reference count\nor by being garbage collected), the generator-iterator\'s "close()"\nmethod will be called, allowing any pending "finally" clauses to\nexecute.\n\nFor full details of "yield" semantics, refer to the Yield expressions\nsection.\n\nNote: In Python 2.2, the "yield" statement was only allowed when the\n  "generators" feature has been enabled.  This "__future__" import\n  statement was used to enable the feature:\n\n     from __future__ import generators\n\nSee also:\n\n  **PEP 0255** - Simple Generators\n     The proposal for adding generators and the "yield" statement to\n     Python.\n\n  **PEP 0342** - Coroutines via Enhanced Generators\n     The proposal that, among other generator enhancements, proposed\n     allowing "yield" to appear inside a "try" ... "finally" block.\n'}
diff --git a/lib/python2.7/rlcompleter.py b/lib/python2.7/rlcompleter.py
index 6e4bd12..7f61c67 100644
--- a/lib/python2.7/rlcompleter.py
+++ b/lib/python2.7/rlcompleter.py
@@ -102,13 +102,16 @@
         """
         import keyword
         matches = []
+        seen = {"__builtins__"}
         n = len(text)
         for word in keyword.kwlist:
             if word[:n] == text:
+                seen.add(word)
                 matches.append(word)
-        for nspace in [__builtin__.__dict__, self.namespace]:
+        for nspace in [self.namespace, __builtin__.__dict__]:
             for word, val in nspace.items():
-                if word[:n] == text and word != "__builtins__":
+                if word[:n] == text and word not in seen:
+                    seen.add(word)
                     matches.append(self._callable_postfix(val, word))
         return matches
 
@@ -135,20 +138,23 @@
             return []
 
         # get the content of the object, except __builtins__
-        words = dir(thisobject)
-        if "__builtins__" in words:
-            words.remove("__builtins__")
+        words = set(dir(thisobject))
+        words.discard("__builtins__")
 
         if hasattr(thisobject, '__class__'):
-            words.append('__class__')
-            words.extend(get_class_members(thisobject.__class__))
+            words.add('__class__')
+            words.update(get_class_members(thisobject.__class__))
         matches = []
         n = len(attr)
         for word in words:
-            if word[:n] == attr and hasattr(thisobject, word):
-                val = getattr(thisobject, word)
+            if word[:n] == attr:
+                try:
+                    val = getattr(thisobject, word)
+                except Exception:
+                    continue  # Exclude properties that are not set
                 word = self._callable_postfix(val, "%s.%s" % (expr, word))
                 matches.append(word)
+        matches.sort()
         return matches
 
 def get_class_members(klass):
diff --git a/lib/python2.7/runpy.py b/lib/python2.7/runpy.py
index c4d7cc2..ad4d077 100644
--- a/lib/python2.7/runpy.py
+++ b/lib/python2.7/runpy.py
@@ -97,27 +97,35 @@
     return None
 
 # Helper to get the loader, code and filename for a module
-def _get_module_details(mod_name):
-    loader = get_loader(mod_name)
-    if loader is None:
-        raise ImportError("No module named %s" % mod_name)
-    if loader.is_package(mod_name):
+def _get_module_details(mod_name, error=ImportError):
+    try:
+        loader = get_loader(mod_name)
+        if loader is None:
+            raise error("No module named %s" % mod_name)
+        ispkg = loader.is_package(mod_name)
+    except ImportError as e:
+        raise error(format(e))
+    if ispkg:
         if mod_name == "__main__" or mod_name.endswith(".__main__"):
-            raise ImportError("Cannot use package as __main__ module")
+            raise error("Cannot use package as __main__ module")
+        __import__(mod_name)  # Do not catch exceptions initializing package
         try:
             pkg_main_name = mod_name + ".__main__"
             return _get_module_details(pkg_main_name)
         except ImportError, e:
-            raise ImportError(("%s; %r is a package and cannot " +
+            raise error(("%s; %r is a package and cannot " +
                                "be directly executed") %(e, mod_name))
-    code = loader.get_code(mod_name)
+    try:
+        code = loader.get_code(mod_name)
+    except ImportError as e:
+        raise error(format(e))
     if code is None:
-        raise ImportError("No code object available for %s" % mod_name)
+        raise error("No code object available for %s" % mod_name)
     filename = _get_filename(loader, mod_name)
     return mod_name, loader, code, filename
 
 
-def _get_main_module_details():
+def _get_main_module_details(error=ImportError):
     # Helper that gives a nicer error message when attempting to
     # execute a zipfile or directory by invoking __main__.py
     main_name = "__main__"
@@ -125,10 +133,13 @@
         return _get_module_details(main_name)
     except ImportError as exc:
         if main_name in str(exc):
-            raise ImportError("can't find %r module in %r" %
+            raise error("can't find %r module in %r" %
                               (main_name, sys.path[0]))
         raise
 
+class _Error(Exception):
+    """Error that _run_module_as_main() should report without a traceback"""
+
 # This function is the actual implementation of the -m switch and direct
 # execution of zipfiles and directories and is deliberately kept private.
 # This avoids a repeat of the situation where run_module() no longer met the
@@ -148,11 +159,12 @@
     """
     try:
         if alter_argv or mod_name != "__main__": # i.e. -m switch
-            mod_name, loader, code, fname = _get_module_details(mod_name)
+            mod_name, loader, code, fname = _get_module_details(
+                mod_name, _Error)
         else:          # i.e. directory or zipfile execution
-            mod_name, loader, code, fname = _get_main_module_details()
-    except ImportError as exc:
-        msg = "%s: %s" % (sys.executable, str(exc))
+            mod_name, loader, code, fname = _get_main_module_details(_Error)
+    except _Error as exc:
+        msg = "%s: %s" % (sys.executable, exc)
         sys.exit(msg)
     pkg_name = mod_name.rpartition('.')[0]
     main_globals = sys.modules["__main__"].__dict__
diff --git a/lib/python2.7/shutil.py b/lib/python2.7/shutil.py
index e78a575..26dcfde 100644
--- a/lib/python2.7/shutil.py
+++ b/lib/python2.7/shutil.py
@@ -449,7 +449,16 @@
         if not dry_run:
             with zipfile.ZipFile(zip_filename, "w",
                                  compression=zipfile.ZIP_DEFLATED) as zf:
+                path = os.path.normpath(base_dir)
+                zf.write(path, path)
+                if logger is not None:
+                    logger.info("adding '%s'", path)
                 for dirpath, dirnames, filenames in os.walk(base_dir):
+                    for name in sorted(dirnames):
+                        path = os.path.normpath(os.path.join(dirpath, name))
+                        zf.write(path, path)
+                        if logger is not None:
+                            logger.info("adding '%s'", path)
                     for name in filenames:
                         path = os.path.normpath(os.path.join(dirpath, name))
                         if os.path.isfile(path):
diff --git a/lib/python2.7/sqlite3/test/factory.py b/lib/python2.7/sqlite3/test/factory.py
index 0813a13..f4b8428 100644
--- a/lib/python2.7/sqlite3/test/factory.py
+++ b/lib/python2.7/sqlite3/test/factory.py
@@ -170,6 +170,14 @@
         self.assertEqual(list(reversed(row)), list(reversed(as_tuple)))
         self.assertIsInstance(row, Sequence)
 
+    def CheckFakeCursorClass(self):
+        # Issue #24257: Incorrect use of PyObject_IsInstance() caused
+        # segmentation fault.
+        class FakeCursor(str):
+            __class__ = sqlite.Cursor
+        cur = self.con.cursor(factory=FakeCursor)
+        self.assertRaises(TypeError, sqlite.Row, cur, ())
+
     def tearDown(self):
         self.con.close()
 
diff --git a/lib/python2.7/sre_parse.py b/lib/python2.7/sre_parse.py
index c29cc16..75f488b 100644
--- a/lib/python2.7/sre_parse.py
+++ b/lib/python2.7/sre_parse.py
@@ -721,14 +721,14 @@
     elif tail:
         raise error, "bogus characters at end of regular expression"
 
-    if flags & SRE_FLAG_DEBUG:
-        p.dump()
-
     if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
         # the VERBOSE flag was switched on inside the pattern.  to be
         # on the safe side, we'll parse the whole thing again...
         return parse(str, p.pattern.flags)
 
+    if flags & SRE_FLAG_DEBUG:
+        p.dump()
+
     return p
 
 def parse_template(source, pattern):
diff --git a/lib/python2.7/subprocess.py b/lib/python2.7/subprocess.py
index f9e9104..78189f4 100644
--- a/lib/python2.7/subprocess.py
+++ b/lib/python2.7/subprocess.py
@@ -498,7 +498,6 @@
         'ignore_environment': 'E',
         'verbose': 'v',
         'bytes_warning': 'b',
-        'hash_randomization': 'R',
         'py3k_warning': '3',
     }
     args = []
@@ -506,6 +505,8 @@
         v = getattr(sys.flags, flag)
         if v > 0:
             args.append('-' + opt * v)
+    if getattr(sys.flags, 'hash_randomization') != 0:
+        args.append('-R')
     for opt in sys.warnoptions:
         args.append('-W' + opt)
     return args
@@ -1312,8 +1313,12 @@
                     os.close(errpipe_write)
 
                 # Wait for exec to fail or succeed; possibly raising exception
-                # Exception limited to 1M
                 data = _eintr_retry_call(os.read, errpipe_read, 1048576)
+                pickle_bits = [data]
+                while data:
+                    pickle_bits.append(data)
+                    data = _eintr_retry_call(os.read, errpipe_read, 1048576)
+                data = "".join(pickle_bits)
             finally:
                 if p2cread is not None and p2cwrite is not None:
                     _close_in_parent(p2cread)
diff --git a/lib/python2.7/tarfile.py b/lib/python2.7/tarfile.py
index 082f361..be68800 100644
--- a/lib/python2.7/tarfile.py
+++ b/lib/python2.7/tarfile.py
@@ -186,7 +186,7 @@
     # itn() below.
     if s[0] != chr(0200):
         try:
-            n = int(nts(s) or "0", 8)
+            n = int(nts(s).strip() or "0", 8)
         except ValueError:
             raise InvalidHeaderError("invalid header")
     else:
@@ -744,12 +744,18 @@
         else:
             return self.readsparse(size)
 
+    def __read(self, size):
+        buf = self.fileobj.read(size)
+        if len(buf) != size:
+            raise ReadError("unexpected end of data")
+        return buf
+
     def readnormal(self, size):
         """Read operation for regular files.
         """
         self.fileobj.seek(self.offset + self.position)
         self.position += size
-        return self.fileobj.read(size)
+        return self.__read(size)
 
     def readsparse(self, size):
         """Read operation for sparse files.
@@ -777,7 +783,7 @@
             realpos = section.realpos + self.position - section.offset
             self.fileobj.seek(self.offset + realpos)
             self.position += size
-            return self.fileobj.read(size)
+            return self.__read(size)
         else:
             self.position += size
             return NUL * size
@@ -1838,11 +1844,12 @@
         return [tarinfo.name for tarinfo in self.getmembers()]
 
     def gettarinfo(self, name=None, arcname=None, fileobj=None):
-        """Create a TarInfo object for either the file `name' or the file
-           object `fileobj' (using os.fstat on its file descriptor). You can
-           modify some of the TarInfo's attributes before you add it using
-           addfile(). If given, `arcname' specifies an alternative name for the
-           file in the archive.
+        """Create a TarInfo object from the result of os.stat or equivalent
+           on an existing file. The file is either named by `name', or
+           specified as a file object `fileobj' with a file descriptor. If
+           given, `arcname' specifies an alternative name for the file in the
+           archive, otherwise, the name is taken from the 'name' attribute of
+           'fileobj', or the 'name' argument.
         """
         self._check("aw")
 
@@ -1863,7 +1870,7 @@
         # Now, fill the TarInfo object with
         # information specific for the file.
         tarinfo = self.tarinfo()
-        tarinfo.tarfile = self
+        tarinfo.tarfile = self  # Not needed
 
         # Use os.stat or os.lstat, depending on platform
         # and if symlinks shall be resolved.
@@ -2028,7 +2035,7 @@
     def addfile(self, tarinfo, fileobj=None):
         """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
            given, tarinfo.size bytes are read from it and added to the archive.
-           You can create TarInfo objects using gettarinfo().
+           You can create TarInfo objects directly, or by using gettarinfo().
            On Windows platforms, `fileobj' should always be opened with mode
            'rb' to avoid irritation about the file size.
         """
@@ -2336,8 +2343,13 @@
             self.firstmember = None
             return m
 
+        # Advance the file pointer.
+        if self.offset != self.fileobj.tell():
+            self.fileobj.seek(self.offset - 1)
+            if not self.fileobj.read(1):
+                raise ReadError("unexpected end of data")
+
         # Read the next block.
-        self.fileobj.seek(self.offset)
         tarinfo = None
         while True:
             try:
diff --git a/lib/python2.7/tempfile.py b/lib/python2.7/tempfile.py
index fbda8eb..7e3b25a 100644
--- a/lib/python2.7/tempfile.py
+++ b/lib/python2.7/tempfile.py
@@ -205,9 +205,14 @@
                     _os.unlink(filename)
                 return dir
             except (OSError, IOError) as e:
-                if e.args[0] != _errno.EEXIST:
-                    break # no point trying more names in this directory
-                pass
+                if e.args[0] == _errno.EEXIST:
+                    continue
+                if (_os.name == 'nt' and e.args[0] == _errno.EACCES and
+                    _os.path.isdir(dir) and _os.access(dir, _os.W_OK)):
+                    # On windows, when a directory with the chosen name already
+                    # exists, EACCES error code is returned instead of EEXIST.
+                    continue
+                break # no point trying more names in this directory
     raise IOError, (_errno.ENOENT,
                     ("No usable temporary directory found in %s" % dirlist))
 
@@ -242,7 +247,8 @@
         except OSError, e:
             if e.errno == _errno.EEXIST:
                 continue # try again
-            if _os.name == 'nt' and e.errno == _errno.EACCES:
+            if (_os.name == 'nt' and e.errno == _errno.EACCES and
+                _os.path.isdir(dir) and _os.access(dir, _os.W_OK)):
                 # On windows, when a directory with the chosen name already
                 # exists, EACCES error code is returned instead of EEXIST.
                 continue
@@ -335,6 +341,11 @@
         except OSError, e:
             if e.errno == _errno.EEXIST:
                 continue # try again
+            if (_os.name == 'nt' and e.errno == _errno.EACCES and
+                _os.path.isdir(dir) and _os.access(dir, _os.W_OK)):
+                # On windows, when a directory with the chosen name already
+                # exists, EACCES error code is returned instead of EEXIST.
+                continue
             raise
 
     raise IOError, (_errno.EEXIST, "No usable temporary directory name found")
@@ -444,8 +455,8 @@
     The file is created as mkstemp() would do it.
 
     Returns an object with a file-like interface; the name of the file
-    is accessible as file.name.  The file will be automatically deleted
-    when it is closed unless the 'delete' argument is set to False.
+    is accessible as its 'name' attribute.  The file will be automatically
+    deleted when it is closed unless the 'delete' argument is set to False.
     """
 
     if dir is None:
@@ -465,7 +476,8 @@
     try:
         file = _os.fdopen(fd, mode, bufsize)
         return _TemporaryFileWrapper(file, name, delete)
-    except:
+    except BaseException:
+        _os.unlink(name)
         _os.close(fd)
         raise
 
diff --git a/lib/python2.7/test/capath/0e4015b9.0 b/lib/python2.7/test/capath/0e4015b9.0
new file mode 100644
index 0000000..b6d259b
--- /dev/null
+++ b/lib/python2.7/test/capath/0e4015b9.0
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE-----
+MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV
+BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u
+IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv
+bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG
+A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo
+b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0
+aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ
+Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm
+Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv
+EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl
+bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN
+AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h
+TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515
+C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM=
+-----END CERTIFICATE-----
diff --git a/lib/python2.7/test/capath/ce7b8643.0 b/lib/python2.7/test/capath/ce7b8643.0
new file mode 100644
index 0000000..b6d259b
--- /dev/null
+++ b/lib/python2.7/test/capath/ce7b8643.0
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE-----
+MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV
+BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u
+IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv
+bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG
+A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo
+b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0
+aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ
+Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm
+Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv
+EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl
+bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN
+AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h
+TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515
+C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM=
+-----END CERTIFICATE-----
diff --git a/lib/python2.7/test/https_svn_python_org_root.pem b/lib/python2.7/test/https_svn_python_org_root.pem
deleted file mode 100644
index e7dfc82..0000000
--- a/lib/python2.7/test/https_svn_python_org_root.pem
+++ /dev/null
@@ -1,41 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIHPTCCBSWgAwIBAgIBADANBgkqhkiG9w0BAQQFADB5MRAwDgYDVQQKEwdSb290
-IENBMR4wHAYDVQQLExVodHRwOi8vd3d3LmNhY2VydC5vcmcxIjAgBgNVBAMTGUNB
-IENlcnQgU2lnbmluZyBBdXRob3JpdHkxITAfBgkqhkiG9w0BCQEWEnN1cHBvcnRA
-Y2FjZXJ0Lm9yZzAeFw0wMzAzMzAxMjI5NDlaFw0zMzAzMjkxMjI5NDlaMHkxEDAO
-BgNVBAoTB1Jvb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEi
-MCAGA1UEAxMZQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJ
-ARYSc3VwcG9ydEBjYWNlcnQub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
-CgKCAgEAziLA4kZ97DYoB1CW8qAzQIxL8TtmPzHlawI229Z89vGIj053NgVBlfkJ
-8BLPRoZzYLdufujAWGSuzbCtRRcMY/pnCujW0r8+55jE8Ez64AO7NV1sId6eINm6
-zWYyN3L69wj1x81YyY7nDl7qPv4coRQKFWyGhFtkZip6qUtTefWIonvuLwphK42y
-fk1WpRPs6tqSnqxEQR5YYGUFZvjARL3LlPdCfgv3ZWiYUQXw8wWRBB0bF4LsyFe7
-w2t6iPGwcswlWyCR7BYCEo8y6RcYSNDHBS4CMEK4JZwFaz+qOqfrU0j36NK2B5jc
-G8Y0f3/JHIJ6BVgrCFvzOKKrF11myZjXnhCLotLddJr3cQxyYN/Nb5gznZY0dj4k
-epKwDpUeb+agRThHqtdB7Uq3EvbXG4OKDy7YCbZZ16oE/9KTfWgu3YtLq1i6L43q
-laegw1SJpfvbi1EinbLDvhG+LJGGi5Z4rSDTii8aP8bQUWWHIbEZAWV/RRyH9XzQ
-QUxPKZgh/TMfdQwEUfoZd9vUFBzugcMd9Zi3aQaRIt0AUMyBMawSB3s42mhb5ivU
-fslfrejrckzzAeVLIL+aplfKkQABi6F1ITe1Yw1nPkZPcCBnzsXWWdsC4PDSy826
-YreQQejdIOQpvGQpQsgi3Hia/0PsmBsJUUtaWsJx8cTLc6nloQsCAwEAAaOCAc4w
-ggHKMB0GA1UdDgQWBBQWtTIb1Mfz4OaO873SsDrusjkY0TCBowYDVR0jBIGbMIGY
-gBQWtTIb1Mfz4OaO873SsDrusjkY0aF9pHsweTEQMA4GA1UEChMHUm9vdCBDQTEe
-MBwGA1UECxMVaHR0cDovL3d3dy5jYWNlcnQub3JnMSIwIAYDVQQDExlDQSBDZXJ0
-IFNpZ25pbmcgQXV0aG9yaXR5MSEwHwYJKoZIhvcNAQkBFhJzdXBwb3J0QGNhY2Vy
-dC5vcmeCAQAwDwYDVR0TAQH/BAUwAwEB/zAyBgNVHR8EKzApMCegJaAjhiFodHRw
-czovL3d3dy5jYWNlcnQub3JnL3Jldm9rZS5jcmwwMAYJYIZIAYb4QgEEBCMWIWh0
-dHBzOi8vd3d3LmNhY2VydC5vcmcvcmV2b2tlLmNybDA0BglghkgBhvhCAQgEJxYl
-aHR0cDovL3d3dy5jYWNlcnQub3JnL2luZGV4LnBocD9pZD0xMDBWBglghkgBhvhC
-AQ0ESRZHVG8gZ2V0IHlvdXIgb3duIGNlcnRpZmljYXRlIGZvciBGUkVFIGhlYWQg
-b3ZlciB0byBodHRwOi8vd3d3LmNhY2VydC5vcmcwDQYJKoZIhvcNAQEEBQADggIB
-ACjH7pyCArpcgBLKNQodgW+JapnM8mgPf6fhjViVPr3yBsOQWqy1YPaZQwGjiHCc
-nWKdpIevZ1gNMDY75q1I08t0AoZxPuIrA2jxNGJARjtT6ij0rPtmlVOKTV39O9lg
-18p5aTuxZZKmxoGCXJzN600BiqXfEVWqFcofN8CCmHBh22p8lqOOLlQ+TyGpkO/c
-gr/c6EWtTZBzCDyUZbAEmXZ/4rzCahWqlwQ3JNgelE5tDlG+1sSPypZt90Pf6DBl
-Jzt7u0NDY8RD97LsaMzhGY4i+5jhe1o+ATc7iwiwovOVThrLm82asduycPAtStvY
-sONvRUgzEv/+PDIqVPfE94rwiCPCR/5kenHA0R6mY7AHfqQv0wGP3J8rtsYIqQ+T
-SCX8Ev2fQtzzxD72V7DX3WnRBnc0CkvSyqD/HMaMyRa+xMwyN2hzXwj7UfdJUzYF
-CpUCTPJ5GhD22Dp1nPMd8aINcGeGG7MW9S/lpOt5hvk9C8JzC6WZrG/8Z7jlLwum
-GCSNe9FINSkYQKyTYOGWhlC0elnYjyELn8+CkcY7v2vcB5G5l1YjqrZslMZIBjzk
-zk6q5PYvCdxTby78dOs6Y5nCpqyJvKeyRKANihDjbPIky/qbn3BHLt4Ui9SyIAmW
-omTxJBzcoTWcFbLUvFUufQb1nA5V9FrWk9p2rSVzTMVD
------END CERTIFICATE-----
diff --git a/lib/python2.7/test/lock_tests.py b/lib/python2.7/test/lock_tests.py
index 2ff75c4..efc464f 100644
--- a/lib/python2.7/test/lock_tests.py
+++ b/lib/python2.7/test/lock_tests.py
@@ -305,6 +305,14 @@
         for r, dt in results2:
             self.assertTrue(r)
 
+    def test_reset_internal_locks(self):
+        evt = self.eventtype()
+        old_lock = evt._Event__cond._Condition__lock
+        evt._reset_internal_locks()
+        new_lock = evt._Event__cond._Condition__lock
+        self.assertIsNot(new_lock, old_lock)
+        self.assertIs(type(new_lock), type(old_lock))
+
 
 class ConditionTests(BaseTestCase):
     """
diff --git a/lib/python2.7/test/pickletester.py b/lib/python2.7/test/pickletester.py
index 1599893..855a9c4 100644
--- a/lib/python2.7/test/pickletester.py
+++ b/lib/python2.7/test/pickletester.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 import unittest
 import pickle
 import cPickle
@@ -5,7 +6,9 @@
 import cStringIO
 import pickletools
 import copy_reg
+import sys
 
+from test import test_support as support
 from test.test_support import TestFailed, verbose, have_unicode, TESTFN
 try:
     from test.test_support import _2G, _1M, precisionbigmemtest
@@ -57,6 +60,21 @@
         return inner
     return decorator
 
+def no_tracing(func):
+    """Decorator to temporarily turn off tracing for the duration of a test."""
+    if not hasattr(sys, 'gettrace'):
+        return func
+    else:
+        def wrapper(*args, **kwargs):
+            original_trace = sys.gettrace()
+            try:
+                sys.settrace(None)
+                return func(*args, **kwargs)
+            finally:
+                sys.settrace(original_trace)
+        wrapper.__name__ = func.__name__
+        return wrapper
+
 
 # Return True if opcode code appears in the pickle, else False.
 def opcode_in_pickle(code, pickle):
@@ -73,6 +91,16 @@
             n += 1
     return n
 
+class UnseekableIO(StringIO.StringIO):
+    def peek(self, *args):
+        raise NotImplementedError
+
+    def seek(self, *args):
+        raise NotImplementedError
+
+    def tell(self):
+        raise NotImplementedError
+
 # We can't very well test the extension registry without putting known stuff
 # in it, but we have to be careful to restore its original state.  Code
 # should do this:
@@ -108,9 +136,37 @@
     def __cmp__(self, other):
         return cmp(self.__dict__, other.__dict__)
 
+class D(C):
+    def __init__(self, arg):
+        pass
+
+class E(C):
+    def __getinitargs__(self):
+        return ()
+
+class H(object):
+    pass
+
+# Hashable mutable key
+class K(object):
+    def __init__(self, value):
+        self.value = value
+
+    def __reduce__(self):
+        # Shouldn't support the recursion itself
+        return K, (self.value,)
+
 import __main__
 __main__.C = C
 C.__module__ = "__main__"
+__main__.D = D
+D.__module__ = "__main__"
+__main__.E = E
+E.__module__ = "__main__"
+__main__.H = H
+H.__module__ = "__main__"
+__main__.K = K
+K.__module__ = "__main__"
 
 class myint(int):
     def __init__(self, x):
@@ -420,11 +476,352 @@
     x.append(5)
     return x
 
-class AbstractPickleTests(unittest.TestCase):
-    # Subclass must define self.dumps, self.loads, self.error.
+
+class AbstractUnpickleTests(unittest.TestCase):
+    # Subclass must define self.loads, self.error.
 
     _testdata = create_data()
 
+    def assert_is_copy(self, obj, objcopy, msg=None):
+        """Utility method to verify if two objects are copies of each others.
+        """
+        if msg is None:
+            msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
+        self.assertEqual(obj, objcopy, msg=msg)
+        self.assertIs(type(obj), type(objcopy), msg=msg)
+        if hasattr(obj, '__dict__'):
+            self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
+            self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
+        if hasattr(obj, '__slots__'):
+            self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
+            for slot in obj.__slots__:
+                self.assertEqual(
+                    hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
+                self.assertEqual(getattr(obj, slot, None),
+                                 getattr(objcopy, slot, None), msg=msg)
+
+    def check_unpickling_error(self, errors, data):
+        try:
+            try:
+                self.loads(data)
+            except:
+                if support.verbose > 1:
+                    exc_type, exc, tb = sys.exc_info()
+                    print '%-32r - %s: %s' % (data, exc_type.__name__, exc)
+                raise
+        except errors:
+            pass
+        else:
+            try:
+                exc_name = errors.__name__
+            except AttributeError:
+                exc_name = str(errors)
+            raise self.failureException('%s not raised' % exc_name)
+
+    def test_load_from_canned_string(self):
+        expected = self._testdata
+        for canned in DATA0, DATA1, DATA2:
+            got = self.loads(canned)
+            self.assert_is_copy(expected, got)
+
+    def test_garyp(self):
+        self.check_unpickling_error(self.error, 'garyp')
+
+    def test_maxint64(self):
+        maxint64 = (1L << 63) - 1
+        data = 'I' + str(maxint64) + '\n.'
+        got = self.loads(data)
+        self.assertEqual(got, maxint64)
+
+        # Try too with a bogus literal.
+        data = 'I' + str(maxint64) + 'JUNK\n.'
+        self.check_unpickling_error(ValueError, data)
+
+    def test_insecure_strings(self):
+        insecure = ["abc", "2 + 2", # not quoted
+                    #"'abc' + 'def'", # not a single quoted string
+                    "'abc", # quote is not closed
+                    "'abc\"", # open quote and close quote don't match
+                    "'abc'   ?", # junk after close quote
+                    "'\\'", # trailing backslash
+                    # issue #17710
+                    "'", '"',
+                    "' ", '" ',
+                    '\'"', '"\'',
+                    " ''", ' ""',
+                    ' ',
+                    # some tests of the quoting rules
+                    #"'abc\"\''",
+                    #"'\\\\a\'\'\'\\\'\\\\\''",
+                    ]
+        for s in insecure:
+            buf = "S" + s + "\n."
+            self.check_unpickling_error(ValueError, buf)
+
+    def test_correctly_quoted_string(self):
+        goodpickles = [("S''\n.", ''),
+                       ('S""\n.', ''),
+                       ('S"\\n"\n.', '\n'),
+                       ("S'\\n'\n.", '\n')]
+        for p, expected in goodpickles:
+            self.assertEqual(self.loads(p), expected)
+
+    def test_load_classic_instance(self):
+        # See issue5180.  Test loading 2.x pickles that
+        # contain an instance of old style class.
+        for X, args in [(C, ()), (D, ('x',)), (E, ())]:
+            xname = X.__name__.encode('ascii')
+            # Protocol 0 (text mode pickle):
+            """
+             0: (    MARK
+             1: i        INST       '__main__ X' (MARK at 0)
+            13: p    PUT        0
+            16: (    MARK
+            17: d        DICT       (MARK at 16)
+            18: p    PUT        1
+            21: b    BUILD
+            22: .    STOP
+            """
+            pickle0 = ("(i__main__\n"
+                       "X\n"
+                       "p0\n"
+                       "(dp1\nb.").replace('X', xname)
+            self.assert_is_copy(X(*args), self.loads(pickle0))
+
+            # Protocol 1 (binary mode pickle)
+            """
+             0: (    MARK
+             1: c        GLOBAL     '__main__ X'
+            13: q        BINPUT     0
+            15: o        OBJ        (MARK at 0)
+            16: q    BINPUT     1
+            18: }    EMPTY_DICT
+            19: q    BINPUT     2
+            21: b    BUILD
+            22: .    STOP
+            """
+            pickle1 = ('(c__main__\n'
+                       'X\n'
+                       'q\x00oq\x01}q\x02b.').replace('X', xname)
+            self.assert_is_copy(X(*args), self.loads(pickle1))
+
+            # Protocol 2 (pickle2 = '\x80\x02' + pickle1)
+            """
+             0: \x80 PROTO      2
+             2: (    MARK
+             3: c        GLOBAL     '__main__ X'
+            15: q        BINPUT     0
+            17: o        OBJ        (MARK at 2)
+            18: q    BINPUT     1
+            20: }    EMPTY_DICT
+            21: q    BINPUT     2
+            23: b    BUILD
+            24: .    STOP
+            """
+            pickle2 = ('\x80\x02(c__main__\n'
+                       'X\n'
+                       'q\x00oq\x01}q\x02b.').replace('X', xname)
+            self.assert_is_copy(X(*args), self.loads(pickle2))
+
+    def test_load_str(self):
+        # From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
+        self.assertEqual(self.loads("S'a\\x00\\xa0'\n."), 'a\x00\xa0')
+        # From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
+        self.assertEqual(self.loads('U\x03a\x00\xa0.'), 'a\x00\xa0')
+        # From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
+        self.assertEqual(self.loads('\x80\x02U\x03a\x00\xa0.'), 'a\x00\xa0')
+
+    def test_load_unicode(self):
+        # From Python 2: pickle.dumps(u'π', protocol=0)
+        self.assertEqual(self.loads('V\\u03c0\n.'), u'π')
+        # From Python 2: pickle.dumps(u'π', protocol=1)
+        self.assertEqual(self.loads('X\x02\x00\x00\x00\xcf\x80.'), u'π')
+        # From Python 2: pickle.dumps(u'π', protocol=2)
+        self.assertEqual(self.loads('\x80\x02X\x02\x00\x00\x00\xcf\x80.'), u'π')
+
+    def test_constants(self):
+        self.assertIsNone(self.loads('N.'))
+        self.assertIs(self.loads('\x88.'), True)
+        self.assertIs(self.loads('\x89.'), False)
+        self.assertIs(self.loads('I01\n.'), True)
+        self.assertIs(self.loads('I00\n.'), False)
+
+    def test_misc_get(self):
+        self.check_unpickling_error(self.error, 'g0\np0\n')
+        self.check_unpickling_error(self.error, 'h\x00q\x00')
+
+    def test_get(self):
+        pickled = '((lp100000\ng100000\nt.'
+        unpickled = self.loads(pickled)
+        self.assertEqual(unpickled, ([],)*2)
+        self.assertIs(unpickled[0], unpickled[1])
+
+    def test_binget(self):
+        pickled = '(]q\xffh\xfft.'
+        unpickled = self.loads(pickled)
+        self.assertEqual(unpickled, ([],)*2)
+        self.assertIs(unpickled[0], unpickled[1])
+
+    def test_long_binget(self):
+        pickled = '(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
+        unpickled = self.loads(pickled)
+        self.assertEqual(unpickled, ([],)*2)
+        self.assertIs(unpickled[0], unpickled[1])
+
+    def test_dup(self):
+        pickled = '((l2t.'
+        unpickled = self.loads(pickled)
+        self.assertEqual(unpickled, ([],)*2)
+        self.assertIs(unpickled[0], unpickled[1])
+
+    def test_bad_stack(self):
+        badpickles = [
+            '.',                        # STOP
+            '0',                        # POP
+            '1',                        # POP_MARK
+            '2',                        # DUP
+            # '(2',                     # PyUnpickler doesn't raise
+            'R',                        # REDUCE
+            ')R',
+            'a',                        # APPEND
+            'Na',
+            'b',                        # BUILD
+            'Nb',
+            'd',                        # DICT
+            'e',                        # APPENDS
+            # '(e',                     # PyUnpickler raises AttributeError
+            'i__builtin__\nlist\n',     # INST
+            'l',                        # LIST
+            'o',                        # OBJ
+            '(o',
+            'p1\n',                     # PUT
+            'q\x00',                    # BINPUT
+            'r\x00\x00\x00\x00',        # LONG_BINPUT
+            's',                        # SETITEM
+            'Ns',
+            'NNs',
+            't',                        # TUPLE
+            'u',                        # SETITEMS
+            # '(u',                     # PyUnpickler doesn't raise
+            '}(Nu',
+            '\x81',                     # NEWOBJ
+            ')\x81',
+            '\x85',                     # TUPLE1
+            '\x86',                     # TUPLE2
+            'N\x86',
+            '\x87',                     # TUPLE3
+            'N\x87',
+            'NN\x87',
+        ]
+        for p in badpickles:
+            self.check_unpickling_error(self.bad_stack_errors, p)
+
+    def test_bad_mark(self):
+        badpickles = [
+            # 'N(.',                      # STOP
+            'N(2',                      # DUP
+            'c__builtin__\nlist\n)(R',  # REDUCE
+            'c__builtin__\nlist\n()R',
+            ']N(a',                     # APPEND
+                                        # BUILD
+            'c__builtin__\nValueError\n)R}(b',
+            'c__builtin__\nValueError\n)R(}b',
+            '(Nd',                      # DICT
+            'N(p1\n',                   # PUT
+            'N(q\x00',                  # BINPUT
+            'N(r\x00\x00\x00\x00',      # LONG_BINPUT
+            '}NN(s',                    # SETITEM
+            '}N(Ns',
+            '}(NNs',
+            '}((u',                     # SETITEMS
+                                        # NEWOBJ
+            'c__builtin__\nlist\n)(\x81',
+            'c__builtin__\nlist\n()\x81',
+            'N(\x85',                   # TUPLE1
+            'NN(\x86',                  # TUPLE2
+            'N(N\x86',
+            'NNN(\x87',                 # TUPLE3
+            'NN(N\x87',
+            'N(NN\x87',
+        ]
+        for p in badpickles:
+            self.check_unpickling_error(self.bad_mark_errors, p)
+
+    def test_truncated_data(self):
+        self.check_unpickling_error(EOFError, '')
+        self.check_unpickling_error(EOFError, 'N')
+        badpickles = [
+            'F',                        # FLOAT
+            'F0.0',
+            'F0.00',
+            'G',                        # BINFLOAT
+            'G\x00\x00\x00\x00\x00\x00\x00',
+            'I',                        # INT
+            'I0',
+            'J',                        # BININT
+            'J\x00\x00\x00',
+            'K',                        # BININT1
+            'L',                        # LONG
+            'L0',
+            'L10',
+            'L0L',
+            'L10L',
+            'M',                        # BININT2
+            'M\x00',
+            # 'P',                        # PERSID
+            # 'Pabc',
+            'S',                        # STRING
+            "S'abc'",
+            'T',                        # BINSTRING
+            'T\x03\x00\x00',
+            'T\x03\x00\x00\x00',
+            'T\x03\x00\x00\x00ab',
+            'U',                        # SHORT_BINSTRING
+            'U\x03',
+            'U\x03ab',
+            'V',                        # UNICODE
+            'Vabc',
+            'X',                        # BINUNICODE
+            'X\x03\x00\x00',
+            'X\x03\x00\x00\x00',
+            'X\x03\x00\x00\x00ab',
+            '(c',                       # GLOBAL
+            '(c__builtin__',
+            '(c__builtin__\n',
+            '(c__builtin__\nlist',
+            'Ng',                       # GET
+            'Ng0',
+            '(i',                       # INST
+            '(i__builtin__',
+            '(i__builtin__\n',
+            '(i__builtin__\nlist',
+            'Nh',                       # BINGET
+            'Nj',                       # LONG_BINGET
+            'Nj\x00\x00\x00',
+            'Np',                       # PUT
+            'Np0',
+            'Nq',                       # BINPUT
+            'Nr',                       # LONG_BINPUT
+            'Nr\x00\x00\x00',
+            '\x80',                     # PROTO
+            '\x82',                     # EXT1
+            '\x83',                     # EXT2
+            '\x84\x01',
+            '\x84',                     # EXT4
+            '\x84\x01\x00\x00',
+            '\x8a',                     # LONG1
+            '\x8b',                     # LONG4
+            '\x8b\x00\x00\x00',
+        ]
+        for p in badpickles:
+            self.check_unpickling_error(self.truncated_errors, p)
+
+
+class AbstractPickleTests(unittest.TestCase):
+    # Subclass must define self.dumps, self.loads.
+
+    _testdata = AbstractUnpickleTests._testdata
+
     def setUp(self):
         pass
 
@@ -455,12 +852,6 @@
             got = self.loads(s)
             self.assertEqual(expected, got)
 
-    def test_load_from_canned_string(self):
-        expected = self._testdata
-        for canned in DATA0, DATA1, DATA2:
-            got = self.loads(canned)
-            self.assertEqual(expected, got)
-
     # There are gratuitous differences between pickles produced by
     # pickle and cPickle, largely because cPickle starts PUT indices at
     # 1 and pickle starts them at 0.  See XXX comment in cPickle's put2() --
@@ -483,18 +874,21 @@
         for proto in protocols:
             s = self.dumps(l, proto)
             x = self.loads(s)
+            self.assertIsInstance(x, list)
             self.assertEqual(len(x), 1)
-            self.assertTrue(x is x[0])
+            self.assertIs(x[0], x)
 
-    def test_recursive_tuple(self):
+    def test_recursive_tuple_and_list(self):
         t = ([],)
         t[0].append(t)
         for proto in protocols:
             s = self.dumps(t, proto)
             x = self.loads(s)
+            self.assertIsInstance(x, tuple)
             self.assertEqual(len(x), 1)
+            self.assertIsInstance(x[0], list)
             self.assertEqual(len(x[0]), 1)
-            self.assertTrue(x is x[0][0])
+            self.assertIs(x[0][0], x)
 
     def test_recursive_dict(self):
         d = {}
@@ -502,8 +896,50 @@
         for proto in protocols:
             s = self.dumps(d, proto)
             x = self.loads(s)
+            self.assertIsInstance(x, dict)
             self.assertEqual(x.keys(), [1])
-            self.assertTrue(x[1] is x)
+            self.assertIs(x[1], x)
+
+    def test_recursive_dict_key(self):
+        d = {}
+        k = K(d)
+        d[k] = 1
+        for proto in protocols:
+            s = self.dumps(d, proto)
+            x = self.loads(s)
+            self.assertIsInstance(x, dict)
+            self.assertEqual(len(x.keys()), 1)
+            self.assertIsInstance(x.keys()[0], K)
+            self.assertIs(x.keys()[0].value, x)
+
+    def test_recursive_list_subclass(self):
+        y = MyList()
+        y.append(y)
+        s = self.dumps(y, 2)
+        x = self.loads(s)
+        self.assertIsInstance(x, MyList)
+        self.assertEqual(len(x), 1)
+        self.assertIs(x[0], x)
+
+    def test_recursive_dict_subclass(self):
+        d = MyDict()
+        d[1] = d
+        s = self.dumps(d, 2)
+        x = self.loads(s)
+        self.assertIsInstance(x, MyDict)
+        self.assertEqual(x.keys(), [1])
+        self.assertIs(x[1], x)
+
+    def test_recursive_dict_subclass_key(self):
+        d = MyDict()
+        k = K(d)
+        d[k] = 1
+        s = self.dumps(d, 2)
+        x = self.loads(s)
+        self.assertIsInstance(x, MyDict)
+        self.assertEqual(len(x.keys()), 1)
+        self.assertIsInstance(x.keys()[0], K)
+        self.assertIs(x.keys()[0].value, x)
 
     def test_recursive_inst(self):
         i = C()
@@ -511,6 +947,7 @@
         for proto in protocols:
             s = self.dumps(i, proto)
             x = self.loads(s)
+            self.assertIsInstance(x, C)
             self.assertEqual(dir(x), dir(i))
             self.assertIs(x.attr, x)
 
@@ -523,35 +960,54 @@
         for proto in protocols:
             s = self.dumps(l, proto)
             x = self.loads(s)
+            self.assertIsInstance(x, list)
             self.assertEqual(len(x), 1)
             self.assertEqual(dir(x[0]), dir(i))
             self.assertEqual(x[0].attr.keys(), [1])
             self.assertTrue(x[0].attr[1] is x)
 
-    def test_garyp(self):
-        self.assertRaises(self.error, self.loads, 'garyp')
+    def check_recursive_collection_and_inst(self, factory):
+        h = H()
+        y = factory([h])
+        h.attr = y
+        for proto in protocols:
+            s = self.dumps(y, proto)
+            x = self.loads(s)
+            self.assertIsInstance(x, type(y))
+            self.assertEqual(len(x), 1)
+            self.assertIsInstance(list(x)[0], H)
+            self.assertIs(list(x)[0].attr, x)
 
-    def test_insecure_strings(self):
-        insecure = ["abc", "2 + 2", # not quoted
-                    #"'abc' + 'def'", # not a single quoted string
-                    "'abc", # quote is not closed
-                    "'abc\"", # open quote and close quote don't match
-                    "'abc'   ?", # junk after close quote
-                    "'\\'", # trailing backslash
-                    "'",    # issue #17710
-                    "' ",   # issue #17710
-                    # some tests of the quoting rules
-                    #"'abc\"\''",
-                    #"'\\\\a\'\'\'\\\'\\\\\''",
-                    ]
-        for s in insecure:
-            buf = "S" + s + "\012p0\012."
-            self.assertRaises(ValueError, self.loads, buf)
+    def test_recursive_list_and_inst(self):
+        self.check_recursive_collection_and_inst(list)
+
+    def test_recursive_tuple_and_inst(self):
+        self.check_recursive_collection_and_inst(tuple)
+
+    def test_recursive_dict_and_inst(self):
+        self.check_recursive_collection_and_inst(dict.fromkeys)
+
+    def test_recursive_set_and_inst(self):
+        self.check_recursive_collection_and_inst(set)
+
+    def test_recursive_frozenset_and_inst(self):
+        self.check_recursive_collection_and_inst(frozenset)
+
+    def test_recursive_list_subclass_and_inst(self):
+        self.check_recursive_collection_and_inst(MyList)
+
+    def test_recursive_tuple_subclass_and_inst(self):
+        self.check_recursive_collection_and_inst(MyTuple)
+
+    def test_recursive_dict_subclass_and_inst(self):
+        self.check_recursive_collection_and_inst(MyDict.fromkeys)
 
     if have_unicode:
         def test_unicode(self):
             endcases = [u'', u'<\\u>', u'<\\\u1234>', u'<\n>',
-                        u'<\\>', u'<\\\U00012345>']
+                        u'<\\>', u'<\\\U00012345>',
+                        # surrogates
+                        u'<\udc80>']
             for proto in protocols:
                 for u in endcases:
                     p = self.dumps(u, proto)
@@ -576,16 +1032,6 @@
                     self.assertEqual(expected, n2)
                 n = n >> 1
 
-    def test_maxint64(self):
-        maxint64 = (1L << 63) - 1
-        data = 'I' + str(maxint64) + '\n.'
-        got = self.loads(data)
-        self.assertEqual(got, maxint64)
-
-        # Try too with a bogus literal.
-        data = 'I' + str(maxint64) + 'JUNK\n.'
-        self.assertRaises(ValueError, self.loads, data)
-
     def test_long(self):
         for proto in protocols:
             # 256 bytes is where LONG4 begins.
@@ -640,6 +1086,7 @@
             s = self.dumps(a, proto)
             b = self.loads(s)
             self.assertEqual(a, b)
+            self.assertIs(a.__class__, b.__class__)
 
     def test_structseq(self):
         import time
@@ -785,6 +1232,26 @@
                 self.assertEqual(B(x), B(y), detail)
                 self.assertEqual(x.__dict__, y.__dict__, detail)
 
+    def test_newobj_proxies(self):
+        # NEWOBJ should use the __class__ rather than the raw type
+        import weakref
+        classes = myclasses[:]
+        # Cannot create weakproxies to these classes
+        for c in (MyInt, MyLong, MyStr, MyTuple):
+            classes.remove(c)
+        for proto in protocols:
+            for C in classes:
+                B = C.__base__
+                x = C(C.sample)
+                x.foo = 42
+                p = weakref.proxy(x)
+                s = self.dumps(p, proto)
+                y = self.loads(s)
+                self.assertEqual(type(y), type(x))  # rather than type(p)
+                detail = (proto, C, B, x, y, type(y))
+                self.assertEqual(B(x), B(y), detail)
+                self.assertEqual(x.__dict__, y.__dict__, detail)
+
     # Register a type with copy_reg, with extension code extcode.  Pickle
     # an object of that type.  Check that the resulting pickle uses opcode
     # (EXT[124]) under proto 2, and not in proto 1.
@@ -879,10 +1346,30 @@
                 self.assertTrue(num_setitems >= 2)
 
     def test_simple_newobj(self):
-        x = object.__new__(SimpleNewObj)  # avoid __init__
+        x = SimpleNewObj.__new__(SimpleNewObj, 0xface)  # avoid __init__
         x.abc = 666
         for proto in protocols:
             s = self.dumps(x, proto)
+            if proto < 1:
+                self.assertIn('\nI64206', s)  # INT
+            else:
+                self.assertIn('M\xce\xfa', s)  # BININT2
+            self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s), proto >= 2)
+            y = self.loads(s)   # will raise TypeError if __init__ called
+            self.assertEqual(y.abc, 666)
+            self.assertEqual(x.__dict__, y.__dict__)
+
+    def test_complex_newobj(self):
+        x = ComplexNewObj.__new__(ComplexNewObj, 0xface)  # avoid __init__
+        x.abc = 666
+        for proto in protocols:
+            s = self.dumps(x, proto)
+            if proto < 1:
+                self.assertIn('\nI64206', s)  # INT
+            elif proto < 2:
+                self.assertIn('M\xce\xfa', s)  # BININT2
+            else:
+                self.assertIn('U\x04FACE', s)  # SHORT_BINSTRING
             self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s), proto >= 2)
             y = self.loads(s)   # will raise TypeError if __init__ called
             self.assertEqual(y.abc, 666)
@@ -944,6 +1431,13 @@
             y = self.loads(s)
             self.assertEqual(y._reduce_called, 1)
 
+    @no_tracing
+    def test_bad_getattr(self):
+        # Issue #3514: crash when there is an infinite loop in __getattr__
+        x = BadGetattr()
+        for proto in protocols:
+            self.assertRaises(RuntimeError, self.dumps, x, proto)
+
     def test_reduce_bad_iterator(self):
         # Issue4176: crash when 4th and 5th items of __reduce__()
         # are not iterators
@@ -956,15 +1450,16 @@
                 # 5th item is not an iterator
                 return dict, (), None, None, []
 
-        # Protocol 0 is less strict and also accept iterables.
+        # Protocol 0 in Python implementation is less strict and also accepts
+        # iterables.
         for proto in protocols:
             try:
                 self.dumps(C(), proto)
-            except (AttributeError, pickle.PickleError, cPickle.PickleError):
+            except (AttributeError, pickle.PicklingError, cPickle.PicklingError):
                 pass
             try:
                 self.dumps(D(), proto)
-            except (AttributeError, pickle.PickleError, cPickle.PickleError):
+            except (AttributeError, pickle.PicklingError, cPickle.PicklingError):
                 pass
 
     def test_many_puts_and_gets(self):
@@ -995,6 +1490,39 @@
             for x_key, y_key in zip(x_keys, y_keys):
                 self.assertIs(x_key, y_key)
 
+    def test_large_pickles(self):
+        # Test the correctness of internal buffering routines when handling
+        # large data.
+        for proto in protocols:
+            data = (1, min, 'xy' * (30 * 1024), len)
+            dumped = self.dumps(data, proto)
+            loaded = self.loads(dumped)
+            self.assertEqual(len(loaded), len(data))
+            self.assertEqual(loaded, data)
+
+    def _check_pickling_with_opcode(self, obj, opcode, proto):
+        pickled = self.dumps(obj, proto)
+        self.assertTrue(opcode_in_pickle(opcode, pickled))
+        unpickled = self.loads(pickled)
+        self.assertEqual(obj, unpickled)
+
+    def test_appends_on_non_lists(self):
+        # Issue #17720
+        obj = REX_six([1, 2, 3])
+        for proto in protocols:
+            if proto == 0:
+                self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
+            else:
+                self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
+
+    def test_setitems_on_non_dicts(self):
+        obj = REX_seven({1: -1, 2: -2, 3: -3})
+        for proto in protocols:
+            if proto == 0:
+                self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
+            else:
+                self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
+
 
 # Test classes for reduce_ex
 
@@ -1034,6 +1562,39 @@
         return object.__reduce__(self)
     # This one used to fail with infinite recursion
 
+class REX_six(object):
+    """This class is used to check the 4th argument (list iterator) of
+    the reduce protocol.
+    """
+    def __init__(self, items=None):
+        if items is None:
+            items = []
+        self.items = items
+    def __eq__(self, other):
+        return type(self) is type(other) and self.items == other.items
+    def append(self, item):
+        self.items.append(item)
+    def extend(self, items):
+        for item in items:
+            self.append(item)
+    def __reduce__(self):
+        return type(self), (), None, iter(self.items), None
+
+class REX_seven(object):
+    """This class is used to check the 5th argument (dict iterator) of
+    the reduce protocol.
+    """
+    def __init__(self, table=None):
+        if table is None:
+            table = {}
+        self.table = table
+    def __eq__(self, other):
+        return type(self) is type(other) and self.table == other.table
+    def __setitem__(self, key, value):
+        self.table[key] = value
+    def __reduce__(self):
+        return type(self), (), None, None, iter(self.table.items())
+
 # Test classes for newobj
 
 class MyInt(int):
@@ -1072,10 +1633,20 @@
 class SlotList(MyList):
     __slots__ = ["foo"]
 
-class SimpleNewObj(object):
-    def __init__(self, a, b, c):
+class SimpleNewObj(int):
+    def __init__(self, *args, **kwargs):
         # raise an error, to make sure this isn't called
         raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
+    def __eq__(self, other):
+        return int(self) == int(other) and self.__dict__ == other.__dict__
+
+class ComplexNewObj(SimpleNewObj):
+    def __getnewargs__(self):
+        return ('%X' % self, 16)
+
+class BadGetattr:
+    def __getattr__(self, key):
+        self.foo
 
 class AbstractPickleModuleTests(unittest.TestCase):
 
@@ -1136,11 +1707,7 @@
         # Test issue4298
         s = '\x58\0\0\0\x54'
         self.assertRaises(EOFError, self.module.loads, s)
-        # Test issue7455
-        s = '0'
-        # XXX Why doesn't pickle raise UnpicklingError?
-        self.assertRaises((IndexError, cPickle.UnpicklingError),
-                          self.module.loads, s)
+
 
 class AbstractPersistentPicklerTests(unittest.TestCase):
 
@@ -1294,6 +1861,46 @@
         f.seek(0)
         self.assertEqual(unpickler.load(), data2)
 
+    def _check_multiple_unpicklings(self, ioclass, seekable):
+        for proto in protocols:
+            data1 = [(x, str(x)) for x in xrange(2000)] + ["abcde", len]
+            f = ioclass()
+            pickler = self.pickler_class(f, protocol=proto)
+            pickler.dump(data1)
+            pickled = f.getvalue()
+
+            N = 5
+            f = ioclass(pickled * N)
+            unpickler = self.unpickler_class(f)
+            for i in xrange(N):
+                if seekable:
+                    pos = f.tell()
+                self.assertEqual(unpickler.load(), data1)
+                if seekable:
+                    self.assertEqual(f.tell(), pos + len(pickled))
+            self.assertRaises(EOFError, unpickler.load)
+
+    def test_multiple_unpicklings_seekable(self):
+        self._check_multiple_unpicklings(StringIO.StringIO, True)
+
+    def test_multiple_unpicklings_unseekable(self):
+        self._check_multiple_unpicklings(UnseekableIO, False)
+
+    def test_unpickling_buffering_readline(self):
+        # Issue #12687: the unpickler's buffering logic could fail with
+        # text mode opcodes.
+        import io
+        data = list(xrange(10))
+        for proto in protocols:
+            for buf_size in xrange(1, 11):
+                f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
+                pickler = self.pickler_class(f, protocol=proto)
+                pickler.dump(data)
+                f.seek(0)
+                unpickler = self.unpickler_class(f)
+                self.assertEqual(unpickler.load(), data)
+
+
 class BigmemPickleTests(unittest.TestCase):
 
     # Memory requirements: 1 byte per character for input strings, 1 byte
diff --git a/lib/python2.7/test/regrtest.py b/lib/python2.7/test/regrtest.py
index 3a8ad91..b6883b5 100755
--- a/lib/python2.7/test/regrtest.py
+++ b/lib/python2.7/test/regrtest.py
@@ -57,11 +57,12 @@
 -t/--threshold THRESHOLD
                 -- call gc.set_threshold(THRESHOLD)
 -F/--forever    -- run the specified tests in a loop, until an error happens
+-P/--pgo        -- enable Profile Guided Optimization training
 
 
 Additional Option Details:
 
--r randomizes test execution order. You can use --randseed=int to provide a
+-r randomizes test execution order. You can use --randseed=int to provide an
 int seed value for the randomizer; this is useful for reproducing troublesome
 test orders.
 
@@ -203,6 +204,15 @@
         newsoft = min(hard, max(soft, 1024*2048))
         resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
 
+# Windows, Tkinter, and resetting the environment after each test don't
+# mix well.  To alleviate test failures due to Tcl/Tk not being able to
+# find its library, get the necessary environment massage done once early.
+if sys.platform == 'win32':
+    try:
+        import FixTk
+    except Exception:
+        pass
+
 # Test result constants.
 PASSED = 1
 FAILED = 0
@@ -231,7 +241,7 @@
          findleaks=False, use_resources=None, trace=False, coverdir='coverage',
          runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
          random_seed=None, use_mp=None, verbose3=False, forever=False,
-         header=False):
+         header=False, pgo=False):
     """Execute a test suite.
 
     This also parses command-line options and modifies its behavior
@@ -257,12 +267,12 @@
 
     test_support.record_original_stdout(sys.stdout)
     try:
-        opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:',
+        opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:P',
             ['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
              'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks',
              'use=', 'threshold=', 'trace', 'coverdir=', 'nocoverdir',
              'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
-             'multiprocess=', 'slaveargs=', 'forever', 'header'])
+             'multiprocess=', 'slaveargs=', 'forever', 'header', 'pgo'])
     except getopt.error, msg:
         usage(2, msg)
 
@@ -357,6 +367,8 @@
             print   # Force a newline (just in case)
             print json.dumps(result)
             sys.exit(0)
+        elif o in ('-P', '--pgo'):
+            pgo = True
         else:
             print >>sys.stderr, ("No handler for option {}.  Please "
                 "report this as a bug at http://bugs.python.org.").format(o)
@@ -422,13 +434,14 @@
 
     # For a partial run, we do not need to clutter the output.
     if verbose or header or not (quiet or single or tests or args):
-        # Print basic platform information
-        print "==", platform.python_implementation(), \
-                    " ".join(sys.version.split())
-        print "==  ", platform.platform(aliased=True), \
-                      "%s-endian" % sys.byteorder
-        print "==  ", os.getcwd()
-        print "Testing with flags:", sys.flags
+        if not pgo:
+            # Print basic platform information
+            print "==", platform.python_implementation(), \
+                        " ".join(sys.version.split())
+            print "==  ", platform.platform(aliased=True), \
+                          "%s-endian" % sys.byteorder
+            print "==  ", os.getcwd()
+            print "Testing with flags:", sys.flags
 
     alltests = findtests(testdir, stdtests, nottests)
     selected = tests or args or alltests
@@ -449,7 +462,7 @@
 
     test_times = []
     test_support.use_resources = use_resources
-    save_modules = sys.modules.keys()
+    save_modules = set(sys.modules)
 
     def accumulate_result(test, result):
         ok, test_time = result
@@ -459,7 +472,6 @@
         elif ok == FAILED:
             bad.append(test)
         elif ok == ENV_CHANGED:
-            bad.append(test)
             environment_changed.append(test)
         elif ok == SKIPPED:
             skipped.append(test)
@@ -496,12 +508,16 @@
             for test in tests:
                 args_tuple = (
                     (test, verbose, quiet),
-                    dict(huntrleaks=huntrleaks, use_resources=use_resources)
+                    dict(huntrleaks=huntrleaks, use_resources=use_resources,
+                         pgo=pgo)
                 )
                 yield (test, args_tuple)
         pending = tests_and_args()
         opt_args = test_support.args_from_interpreter_flags()
         base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest']
+        # required to spawn a new process with PGO flag on/off
+        if pgo:
+            base_cmd = base_cmd + ['--pgo']
         def work():
             # A worker thread.
             try:
@@ -542,7 +558,7 @@
                     continue
                 if stdout:
                     print stdout
-                if stderr:
+                if stderr and not pgo:
                     print >>sys.stderr, stderr
                 sys.stdout.flush()
                 sys.stderr.flush()
@@ -575,11 +591,12 @@
                               globals=globals(), locals=vars())
             else:
                 try:
-                    result = runtest(test, verbose, quiet, huntrleaks)
+                    result = runtest(test, verbose, quiet, huntrleaks, None, pgo)
                     accumulate_result(test, result)
                     if verbose3 and result[0] == FAILED:
-                        print "Re-running test %r in verbose mode" % test
-                        runtest(test, True, quiet, huntrleaks)
+                        if not pgo:
+                            print "Re-running test %r in verbose mode" % test
+                        runtest(test, True, quiet, huntrleaks, None, pgo)
                 except KeyboardInterrupt:
                     interrupted = True
                     break
@@ -599,14 +616,14 @@
                 if module not in save_modules and module.startswith("test."):
                     test_support.unload(module)
 
-    if interrupted:
+    if interrupted and not pgo:
         # print a newline after ^C
         print
         print "Test suite interrupted by signal SIGINT."
         omitted = set(selected) - set(good) - set(bad) - set(skipped)
         print count(len(omitted), "test"), "omitted:"
         printlist(omitted)
-    if good and not quiet:
+    if good and not quiet and not pgo:
         if not bad and not skipped and not interrupted and len(good) > 1:
             print "All",
         print count(len(good), "test"), "OK."
@@ -615,16 +632,14 @@
         print "10 slowest tests:"
         for time, test in test_times[:10]:
             print "%s: %.1fs" % (test, time)
-    if bad:
-        bad = set(bad) - set(environment_changed)
-        if bad:
-            print count(len(bad), "test"), "failed:"
-            printlist(bad)
-        if environment_changed:
-            print "{} altered the execution environment:".format(
-                count(len(environment_changed), "test"))
-            printlist(environment_changed)
-    if skipped and not quiet:
+    if bad and not pgo:
+        print count(len(bad), "test"), "failed:"
+        printlist(bad)
+    if environment_changed and not pgo:
+        print "{} altered the execution environment:".format(
+            count(len(environment_changed), "test"))
+        printlist(environment_changed)
+    if skipped and not quiet and not pgo:
         print count(len(skipped), "test"), "skipped:"
         printlist(skipped)
 
@@ -644,18 +659,23 @@
 
     if verbose2 and bad:
         print "Re-running failed tests in verbose mode"
-        for test in bad:
+        for test in bad[:]:
             print "Re-running test %r in verbose mode" % test
             sys.stdout.flush()
             try:
                 test_support.verbose = True
-                ok = runtest(test, True, quiet, huntrleaks)
+                ok = runtest(test, True, quiet, huntrleaks, None, pgo)
             except KeyboardInterrupt:
                 # print a newline separate from the ^C
                 print
                 break
-            except:
-                raise
+            else:
+                if ok[0] in {PASSED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED}:
+                    bad.remove(test)
+        else:
+            if bad:
+                print count(len(bad), "test"), "failed again:"
+                printlist(bad)
 
     if single:
         if next_single_test:
@@ -705,7 +725,7 @@
     return stdtests + sorted(tests)
 
 def runtest(test, verbose, quiet,
-            huntrleaks=False, use_resources=None):
+            huntrleaks=False, use_resources=None, pgo=False):
     """Run a single test.
 
     test -- the name of the test
@@ -714,6 +734,9 @@
     test_times -- a list of (time, test_name) pairs
     huntrleaks -- run multiple times to test for leaks; requires a debug
                   build; a triple corresponding to -R's three arguments
+    pgo -- if true, do not print unnecessary info when running the test
+           for Profile Guided Optimization build
+
     Returns one of the test result constants:
         INTERRUPTED      KeyboardInterrupt when run under -j
         RESOURCE_DENIED  test skipped because resource denied
@@ -727,7 +750,7 @@
     if use_resources is not None:
         test_support.use_resources = use_resources
     try:
-        return runtest_inner(test, verbose, quiet, huntrleaks)
+        return runtest_inner(test, verbose, quiet, huntrleaks, pgo)
     finally:
         cleanup_test_droppings(test, verbose)
 
@@ -756,10 +779,11 @@
 
     changed = False
 
-    def __init__(self, testname, verbose=0, quiet=False):
+    def __init__(self, testname, verbose=0, quiet=False, pgo=False):
         self.testname = testname
         self.verbose = verbose
         self.quiet = quiet
+        self.pgo = pgo
 
     # To add things to save and restore, add a name XXX to the resources list
     # and add corresponding get_XXX/restore_XXX functions.  get_XXX should
@@ -873,11 +897,11 @@
             if current != original:
                 self.changed = True
                 restore(original)
-                if not self.quiet:
+                if not self.quiet and not self.pgo:
                     print >>sys.stderr, (
                           "Warning -- {} was modified by {}".format(
                                                  name, self.testname))
-                    if self.verbose > 1:
+                    if self.verbose > 1 and not self.pgo:
                         print >>sys.stderr, (
                               "  Before: {}\n  After:  {} ".format(
                                                   original, current))
@@ -888,7 +912,7 @@
         return False
 
 
-def runtest_inner(test, verbose, quiet, huntrleaks=False):
+def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False):
     test_support.unload(test)
     if verbose:
         capture_stdout = None
@@ -907,7 +931,7 @@
             else:
                 # Always import it from the test package
                 abstest = 'test.' + test
-            with saved_test_environment(test, verbose, quiet) as environment:
+            with saved_test_environment(test, verbose, quiet, pgo) as environment:
                 start_time = time.time()
                 the_package = __import__(abstest, globals(), locals(), [])
                 the_module = getattr(the_package, test)
@@ -924,26 +948,28 @@
         finally:
             sys.stdout = save_stdout
     except test_support.ResourceDenied, msg:
-        if not quiet:
+        if not quiet and not pgo:
             print test, "skipped --", msg
             sys.stdout.flush()
         return RESOURCE_DENIED, test_time
     except unittest.SkipTest, msg:
-        if not quiet:
+        if not quiet and not pgo:
             print test, "skipped --", msg
             sys.stdout.flush()
         return SKIPPED, test_time
     except KeyboardInterrupt:
         raise
     except test_support.TestFailed, msg:
-        print >>sys.stderr, "test", test, "failed --", msg
+        if not pgo:
+            print >>sys.stderr, "test", test, "failed --", msg
         sys.stderr.flush()
         return FAILED, test_time
     except:
         type, value = sys.exc_info()[:2]
-        print >>sys.stderr, "test", test, "crashed --", str(type) + ":", value
+        if not pgo:
+            print >>sys.stderr, "test", test, "crashed --", str(type) + ":", value
         sys.stderr.flush()
-        if verbose:
+        if verbose and not pgo:
             traceback.print_exc(file=sys.stderr)
             sys.stderr.flush()
         return FAILED, test_time
diff --git a/lib/python2.7/test/script_helper.py b/lib/python2.7/test/script_helper.py
index 7f7c70e..6be47bd 100644
--- a/lib/python2.7/test/script_helper.py
+++ b/lib/python2.7/test/script_helper.py
@@ -134,9 +134,9 @@
     #    zip_file.close()
     return zip_name, os.path.join(zip_name, name_in_zip)
 
-def make_pkg(pkg_dir):
+def make_pkg(pkg_dir, init_source=''):
     os.mkdir(pkg_dir)
-    make_script(pkg_dir, '__init__', '')
+    make_script(pkg_dir, '__init__', init_source)
 
 def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
                  source, depth=1, compiled=False):
diff --git a/lib/python2.7/test/selfsigned_pythontestdotnet.pem b/lib/python2.7/test/selfsigned_pythontestdotnet.pem
index 9a80073..b6d259b 100644
--- a/lib/python2.7/test/selfsigned_pythontestdotnet.pem
+++ b/lib/python2.7/test/selfsigned_pythontestdotnet.pem
@@ -1,5 +1,5 @@
 -----BEGIN CERTIFICATE-----
-MIIChzCCAfCgAwIBAgIJAKGU95wKR8pSMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV
+MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV
 BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u
 IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv
 bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG
@@ -8,9 +8,9 @@
 aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ
 Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm
 Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv
-EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjKTAnMCUGA1UdEQQeMByCGnNl
-bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MA0GCSqGSIb3DQEBBQUAA4GBAIOXmdtM
-eG9qzP9TiXW/Gc/zI4cBfdCpC+Y4gOfC9bQUC7hefix4iO3+iZjgy3X/FaRxUUoV
-HKiXcXIaWqTSUWp45cSh0MbwZXudp6JIAptzdAhvvCrPKeC9i9GvxsPD4LtDAL97
-vSaxQBezA7hdxZd90/EeyMgVZgAnTCnvAWX9
+EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl
+bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN
+AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h
+TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515
+C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM=
 -----END CERTIFICATE-----
diff --git a/lib/python2.7/test/seq_tests.py b/lib/python2.7/test/seq_tests.py
index f5e4e0e..f4673d4 100644
--- a/lib/python2.7/test/seq_tests.py
+++ b/lib/python2.7/test/seq_tests.py
@@ -84,6 +84,14 @@
     'Test multiple tiers of iterators'
     return chain(imap(lambda x:x, iterfunc(IterGen(Sequence(seqn)))))
 
+class LyingTuple(tuple):
+    def __iter__(self):
+        yield 1
+
+class LyingList(list):
+    def __iter__(self):
+        yield 1
+
 class CommonTest(unittest.TestCase):
     # The type to be tested
     type2test = None
@@ -130,6 +138,10 @@
             self.assertRaises(TypeError, self.type2test, IterNoNext(s))
             self.assertRaises(ZeroDivisionError, self.type2test, IterGenExc(s))
 
+        # Issue #23757
+        self.assertEqual(self.type2test(LyingTuple((2,))), self.type2test((1,)))
+        self.assertEqual(self.type2test(LyingList([2])), self.type2test([1]))
+
     def test_truth(self):
         self.assertFalse(self.type2test())
         self.assertTrue(self.type2test([42]))
diff --git a/lib/python2.7/test/string_tests.py b/lib/python2.7/test/string_tests.py
index 6d87eb6..b2f837b 100644
--- a/lib/python2.7/test/string_tests.py
+++ b/lib/python2.7/test/string_tests.py
@@ -1295,8 +1295,10 @@
                   ('hex', '68656c6c6f20776f726c64'),
                   ('uu', 'begin 666 <data>\n+:&5L;&\\@=V]R;&0 \n \nend\n')]
         for encoding, data in codecs:
-            self.checkequal(data, 'hello world', 'encode', encoding)
-            self.checkequal('hello world', data, 'decode', encoding)
+            with test_support.check_py3k_warnings():
+                self.checkequal(data, 'hello world', 'encode', encoding)
+            with test_support.check_py3k_warnings():
+                self.checkequal('hello world', data, 'decode', encoding)
         # zlib is optional, so we make the test optional too...
         try:
             import zlib
@@ -1304,8 +1306,10 @@
             pass
         else:
             data = 'x\x9c\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\x01\x00\x1a\x0b\x04]'
-            self.checkequal(data, 'hello world', 'encode', 'zlib')
-            self.checkequal('hello world', data, 'decode', 'zlib')
+            with test_support.check_py3k_warnings():
+                self.checkequal(data, 'hello world', 'encode', 'zlib')
+            with test_support.check_py3k_warnings():
+                self.checkequal('hello world', data, 'decode', 'zlib')
 
         self.checkraises(TypeError, 'xyz', 'decode', 42)
         self.checkraises(TypeError, 'xyz', 'encode', 42)
diff --git a/lib/python2.7/test/test__locale.py b/lib/python2.7/test/test__locale.py
index 88f2c44..576642d 100644
--- a/lib/python2.7/test/test__locale.py
+++ b/lib/python2.7/test/test__locale.py
@@ -23,7 +23,8 @@
     'de_DE', 'sr_YU', 'br_FR', 'nl_BE', 'sv_FI', 'pl_PL', 'fr_CA', 'fo_FO',
     'bs_BA', 'fr_LU', 'kl_GL', 'fa_IR', 'de_BE', 'sv_SE', 'it_CH', 'uk_UA',
     'eu_ES', 'vi_VN', 'af_ZA', 'nb_NO', 'en_DK', 'tg_TJ', 'ps_AF.UTF-8', 'en_US',
-    'es_ES.ISO8859-1', 'fr_FR.ISO8859-15', 'ru_RU.KOI8-R', 'ko_KR.eucKR']
+    'fr_FR.ISO8859-1', 'fr_FR.UTF-8', 'fr_FR.ISO8859-15@euro',
+    'ru_RU.KOI8-R', 'ko_KR.eucKR']
 
 # Workaround for MSVC6(debug) crash bug
 if "MSC v.1200" in sys.version:
@@ -37,8 +38,10 @@
 # value is not known, use '' .
 known_numerics = {
     'en_US': ('.', ','),
-    'fr_FR' : (',', ' '),
     'de_DE' : (',', '.'),
+    # The French thousands separator may be a breaking or non-breaking space
+    # depending on the platform, so do not test it
+    'fr_FR' : (',', ''),
     'ps_AF.UTF-8' : ('\xd9\xab', '\xd9\xac'),
 }
 
diff --git a/lib/python2.7/test/test_array.py b/lib/python2.7/test/test_array.py
index b933cbf..9f5c09d 100644
--- a/lib/python2.7/test/test_array.py
+++ b/lib/python2.7/test/test_array.py
@@ -18,7 +18,9 @@
         array.array.__init__(self, typecode)
 
 tests = [] # list to accumulate all tests
-typecodes = "cubBhHiIlLfd"
+typecodes = "cbBhHiIlLfd"
+if test_support.have_unicode:
+    typecodes += "u"
 
 class BadConstructorTest(unittest.TestCase):
 
@@ -26,7 +28,17 @@
         self.assertRaises(TypeError, array.array)
         self.assertRaises(TypeError, array.array, spam=42)
         self.assertRaises(TypeError, array.array, 'xx')
+        self.assertRaises(TypeError, array.array, '')
+        self.assertRaises(TypeError, array.array, 1)
         self.assertRaises(ValueError, array.array, 'x')
+        self.assertRaises(ValueError, array.array, '\x80')
+
+    @test_support.requires_unicode
+    def test_unicode_constructor(self):
+        self.assertRaises(TypeError, array.array, u'xx')
+        self.assertRaises(TypeError, array.array, u'')
+        self.assertRaises(ValueError, array.array, u'x')
+        self.assertRaises(ValueError, array.array, u'\x80')
 
 tests.append(BadConstructorTest)
 
@@ -235,6 +247,7 @@
         self.assertRaises(TypeError, a.tostring, 42)
         self.assertRaises(TypeError, b.fromstring)
         self.assertRaises(TypeError, b.fromstring, 42)
+        self.assertRaises(ValueError, a.fromstring, a)
         b.fromstring(a.tostring())
         self.assertEqual(a, b)
         if a.itemsize>1:
@@ -827,6 +840,7 @@
         self.assertEqual(s.color, "red")
         self.assertEqual(s.__dict__.keys(), ["color"])
 
+    @test_support.requires_unicode
     def test_nounicode(self):
         a = array.array(self.typecode, self.example)
         self.assertRaises(ValueError, a.fromunicode, unicode(''))
@@ -1039,6 +1053,17 @@
     minitemsize = 4
 tests.append(UnsignedLongTest)
 
+
+@test_support.requires_unicode
+class UnicodeTypecodeTest(unittest.TestCase):
+    def test_unicode_typecode(self):
+        for typecode in typecodes:
+            a = array.array(unicode(typecode))
+            self.assertEqual(a.typecode, typecode)
+            self.assertIs(type(a.typecode), str)
+tests.append(UnicodeTypecodeTest)
+
+
 class FPTest(NumberTest):
     example = [-42.0, 0, 42, 1e5, -1e10]
     smallerexample = [-42.0, 0, 42, 1e5, -2e10]
diff --git a/lib/python2.7/test/test_audioop.py b/lib/python2.7/test/test_audioop.py
index e5a5159..4af7350 100644
--- a/lib/python2.7/test/test_audioop.py
+++ b/lib/python2.7/test/test_audioop.py
@@ -210,6 +210,21 @@
             self.assertEqual(audioop.lin2adpcm(b'\0' * w * 10, w, None),
                              (b'\0' * 5, (0, 0)))
 
+    def test_invalid_adpcm_state(self):
+        # state must be a tuple or None, not an integer
+        self.assertRaises(TypeError, audioop.adpcm2lin, b'\0', 1, 555)
+        self.assertRaises(TypeError, audioop.lin2adpcm, b'\0', 1, 555)
+        # Issues #24456, #24457: index out of range
+        self.assertRaises(ValueError, audioop.adpcm2lin, b'\0', 1, (0, -1))
+        self.assertRaises(ValueError, audioop.adpcm2lin, b'\0', 1, (0, 89))
+        self.assertRaises(ValueError, audioop.lin2adpcm, b'\0', 1, (0, -1))
+        self.assertRaises(ValueError, audioop.lin2adpcm, b'\0', 1, (0, 89))
+        # value out of range
+        self.assertRaises(ValueError, audioop.adpcm2lin, b'\0', 1, (-0x8001, 0))
+        self.assertRaises(ValueError, audioop.adpcm2lin, b'\0', 1, (0x8000, 0))
+        self.assertRaises(ValueError, audioop.lin2adpcm, b'\0', 1, (-0x8001, 0))
+        self.assertRaises(ValueError, audioop.lin2adpcm, b'\0', 1, (0x8000, 0))
+
     def test_lin2alaw(self):
         self.assertEqual(audioop.lin2alaw(datas[1], 1),
                          b'\xd5\x87\xa4\x24\xaa\x2a\x5a')
@@ -280,6 +295,9 @@
                              (b'', (-2, ((0, 0),))))
             self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None)[0],
                              datas[w])
+            self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None, 1, 0)[0],
+                             datas[w])
+
         state = None
         d1, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
         d2, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
@@ -295,6 +313,18 @@
             self.assertEqual(d, d0)
             self.assertEqual(state, state0)
 
+        expected = {
+            1: packs[1](0, 0x0d, 0x37, -0x26, 0x55, -0x4b, -0x14),
+            2: packs[2](0, 0x0da7, 0x3777, -0x2630, 0x5673, -0x4a64, -0x129a),
+            4: packs[4](0, 0x0da740da, 0x37777776, -0x262fc962,
+                        0x56740da6, -0x4a62fc96, -0x1298bf26),
+        }
+        for w in 1, 2, 4:
+            self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None, 3, 1)[0],
+                             expected[w])
+            self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None, 30, 10)[0],
+                             expected[w])
+
     def test_reverse(self):
         for w in 1, 2, 4:
             self.assertEqual(audioop.reverse(b'', w), b'')
diff --git a/lib/python2.7/test/test_base64.py b/lib/python2.7/test/test_base64.py
index 3f2cee4..6e67dc0 100644
--- a/lib/python2.7/test/test_base64.py
+++ b/lib/python2.7/test/test_base64.py
@@ -137,9 +137,30 @@
         # Non-bytes
         eq(base64.urlsafe_b64decode(bytearray('01a-b_cd')), '\xd3V\xbeo\xf7\x1d')
 
-    def test_b64decode_error(self):
+    def test_b64decode_padding_error(self):
         self.assertRaises(TypeError, base64.b64decode, 'abc')
 
+    def test_b64decode_invalid_chars(self):
+        # issue 1466065: Test some invalid characters.
+        tests = ((b'%3d==', b'\xdd'),
+                 (b'$3d==', b'\xdd'),
+                 (b'[==', b''),
+                 (b'YW]3=', b'am'),
+                 (b'3{d==', b'\xdd'),
+                 (b'3d}==', b'\xdd'),
+                 (b'@@', b''),
+                 (b'!', b''),
+                 (b'YWJj\nYWI=', b'abcab'))
+        for bstr, res in tests:
+            self.assertEqual(base64.b64decode(bstr), res)
+            self.assertEqual(base64.standard_b64decode(bstr), res)
+            self.assertEqual(base64.urlsafe_b64decode(bstr), res)
+
+        # Normal alphabet characters not discarded when alternative given
+        res = b'\xFB\xEF\xBE\xFF\xFF\xFF'
+        self.assertEqual(base64.b64decode(b'++[[//]]', b'[]'), res)
+        self.assertEqual(base64.urlsafe_b64decode(b'++--//__'), res)
+
     def test_b32encode(self):
         eq = self.assertEqual
         eq(base64.b32encode(''), '')
@@ -206,6 +227,10 @@
         eq(base64.b16decode('0102abcdef', True), '\x01\x02\xab\xcd\xef')
         # Non-bytes
         eq(base64.b16decode(bytearray("0102ABCDEF")), '\x01\x02\xab\xcd\xef')
+        # Non-alphabet characters
+        self.assertRaises(TypeError, base64.b16decode, '0102AG')
+        # Incorrect "padding"
+        self.assertRaises(TypeError, base64.b16decode, '010')
 
 
 
diff --git a/lib/python2.7/test/test_buffer.py b/lib/python2.7/test/test_buffer.py
index a02c5f7..de80d44 100644
--- a/lib/python2.7/test/test_buffer.py
+++ b/lib/python2.7/test/test_buffer.py
@@ -4,6 +4,8 @@
 
 """
 
+import copy
+import pickle
 import sys
 import unittest
 from test import test_support
@@ -35,6 +37,18 @@
         buf = buffer(data, sys.maxsize, sys.maxsize)
         self.assertEqual(buf[:4096], "")
 
+    def test_copy(self):
+        buf = buffer(b'abc')
+        with self.assertRaises(TypeError):
+            copy.copy(buf)
+
+    # See issue #22995
+    ## def test_pickle(self):
+    ##     buf = buffer(b'abc')
+    ##     for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+    ##         with self.assertRaises(TypeError):
+    ##             pickle.dumps(buf, proto)
+
 
 def test_main():
     with test_support.check_py3k_warnings(("buffer.. not supported",
diff --git a/lib/python2.7/test/test_bufio.py b/lib/python2.7/test/test_bufio.py
index 108b1e1..d6b12e3 100644
--- a/lib/python2.7/test/test_bufio.py
+++ b/lib/python2.7/test/test_bufio.py
@@ -34,7 +34,7 @@
             line = f.readline()
             self.assertEqual(line, s)
             line = f.readline()
-            self.assertTrue(not line) # Must be at EOF
+            self.assertFalse(line) # Must be at EOF
             f.close()
         finally:
             support.unlink(support.TESTFN)
diff --git a/lib/python2.7/test/test_builtin.py b/lib/python2.7/test/test_builtin.py
index 15581d9..c9347e9 100644
--- a/lib/python2.7/test/test_builtin.py
+++ b/lib/python2.7/test/test_builtin.py
@@ -1682,6 +1682,134 @@
         data = 'The quick Brown fox Jumped over The lazy Dog'.split()
         self.assertRaises(TypeError, sorted, data, None, lambda x,y: 0)
 
+
+class TestType(unittest.TestCase):
+    def test_new_type(self):
+        A = type('A', (), {})
+        self.assertEqual(A.__name__, 'A')
+        self.assertEqual(A.__module__, __name__)
+        self.assertEqual(A.__bases__, (object,))
+        self.assertIs(A.__base__, object)
+        x = A()
+        self.assertIs(type(x), A)
+        self.assertIs(x.__class__, A)
+
+        class B:
+            def ham(self):
+                return 'ham%d' % self
+        C = type('C', (B, int), {'spam': lambda self: 'spam%s' % self})
+        self.assertEqual(C.__name__, 'C')
+        self.assertEqual(C.__module__, __name__)
+        self.assertEqual(C.__bases__, (B, int))
+        self.assertIs(C.__base__, int)
+        self.assertIn('spam', C.__dict__)
+        self.assertNotIn('ham', C.__dict__)
+        x = C(42)
+        self.assertEqual(x, 42)
+        self.assertIs(type(x), C)
+        self.assertIs(x.__class__, C)
+        self.assertEqual(x.ham(), 'ham42')
+        self.assertEqual(x.spam(), 'spam42')
+        self.assertEqual(x.bit_length(), 6)
+
+    def test_type_new_keywords(self):
+        class B:
+            def ham(self):
+                return 'ham%d' % self
+        C = type.__new__(type,
+                         name='C',
+                         bases=(B, int),
+                         dict={'spam': lambda self: 'spam%s' % self})
+        self.assertEqual(C.__name__, 'C')
+        self.assertEqual(C.__module__, __name__)
+        self.assertEqual(C.__bases__, (B, int))
+        self.assertIs(C.__base__, int)
+        self.assertIn('spam', C.__dict__)
+        self.assertNotIn('ham', C.__dict__)
+
+    def test_type_name(self):
+        for name in 'A', '\xc4', 'B.A', '42', '':
+            A = type(name, (), {})
+            self.assertEqual(A.__name__, name)
+            self.assertEqual(A.__module__, __name__)
+        with self.assertRaises(ValueError):
+            type('A\x00B', (), {})
+        with self.assertRaises(TypeError):
+            type(u'A', (), {})
+
+        C = type('C', (), {})
+        for name in 'A', '\xc4', 'B.A', '42', '':
+            C.__name__ = name
+            self.assertEqual(C.__name__, name)
+            self.assertEqual(C.__module__, __name__)
+
+        A = type('C', (), {})
+        with self.assertRaises(ValueError):
+            A.__name__ = 'A\x00B'
+        self.assertEqual(A.__name__, 'C')
+        with self.assertRaises(TypeError):
+            A.__name__ = u'A'
+        self.assertEqual(A.__name__, 'C')
+
+    def test_type_doc(self):
+        tests = ('x', '\xc4', 'x\x00y', 42, None)
+        if have_unicode:
+            tests += (u'\xc4', u'x\x00y')
+        for doc in tests:
+            A = type('A', (), {'__doc__': doc})
+            self.assertEqual(A.__doc__, doc)
+
+        A = type('A', (), {})
+        self.assertEqual(A.__doc__, None)
+        with self.assertRaises(AttributeError):
+            A.__doc__ = 'x'
+
+    def test_bad_args(self):
+        with self.assertRaises(TypeError):
+            type()
+        with self.assertRaises(TypeError):
+            type('A', ())
+        with self.assertRaises(TypeError):
+            type('A', (), {}, ())
+        with self.assertRaises(TypeError):
+            type('A', (), dict={})
+        with self.assertRaises(TypeError):
+            type('A', [], {})
+        with self.assertRaises(TypeError):
+            type('A', (), UserDict.UserDict())
+        with self.assertRaises(TypeError):
+            type('A', (None,), {})
+        with self.assertRaises(TypeError):
+            type('A', (bool,), {})
+        with self.assertRaises(TypeError):
+            type('A', (int, str), {})
+        class B:
+            pass
+        with self.assertRaises(TypeError):
+            type('A', (B,), {})
+
+    def test_bad_slots(self):
+        with self.assertRaises(TypeError):
+            type('A', (long,), {'__slots__': 'x'})
+        with self.assertRaises(TypeError):
+            type('A', (), {'__slots__': ''})
+        with self.assertRaises(TypeError):
+            type('A', (), {'__slots__': '42'})
+        with self.assertRaises(TypeError):
+            type('A', (), {'__slots__': 'x\x00y'})
+        with self.assertRaises(TypeError):
+            type('A', (), {'__slots__': ('__dict__', '__dict__')})
+        with self.assertRaises(TypeError):
+            type('A', (), {'__slots__': ('__weakref__', '__weakref__')})
+
+        class B(object):
+            pass
+        with self.assertRaises(TypeError):
+            type('A', (B,), {'__slots__': '__dict__'})
+        with self.assertRaises(TypeError):
+            type('A', (B,), {'__slots__': '__weakref__'})
+
+
 def _run_unittest(*args):
     with check_py3k_warnings(
             (".+ not supported in 3.x", DeprecationWarning),
@@ -1696,7 +1824,7 @@
                 (".+ not supported in 3.x", DeprecationWarning)):
             run_unittest(TestExecFile)
     numruns += 1
-    test_classes = (BuiltinTest, TestSorted)
+    test_classes = (BuiltinTest, TestSorted, TestType)
 
     _run_unittest(*test_classes)
 
diff --git a/lib/python2.7/test/test_bytes.py b/lib/python2.7/test/test_bytes.py
index 988b931..02fba38 100644
--- a/lib/python2.7/test/test_bytes.py
+++ b/lib/python2.7/test/test_bytes.py
@@ -722,10 +722,27 @@
         for i in range(100):
             b += b"x"
             alloc = b.__alloc__()
-            self.assertTrue(alloc >= len(b))
+            self.assertGreater(alloc, len(b))  # including trailing null byte
             if alloc not in seq:
                 seq.append(alloc)
 
+    def test_init_alloc(self):
+        b = bytearray()
+        def g():
+            for i in range(1, 100):
+                yield i
+                a = list(b)
+                self.assertEqual(a, list(range(1, len(a)+1)))
+                self.assertEqual(len(b), len(a))
+                self.assertLessEqual(len(b), i)
+                alloc = b.__alloc__()
+                self.assertGreater(alloc, len(b))  # including trailing null byte
+        b.__init__(g())
+        self.assertEqual(list(b), list(range(1, 100)))
+        self.assertEqual(len(b), 99)
+        alloc = b.__alloc__()
+        self.assertGreater(alloc, len(b))
+
     def test_extend(self):
         orig = b'hello'
         a = bytearray(orig)
diff --git a/lib/python2.7/test/test_calendar.py b/lib/python2.7/test/test_calendar.py
index 5692642..46c4a6f 100644
--- a/lib/python2.7/test/test_calendar.py
+++ b/lib/python2.7/test/test_calendar.py
@@ -513,8 +513,8 @@
     def test_option_encoding(self):
         self.assertFailure('-e')
         self.assertFailure('--encoding')
-        stdout = self.run_ok('--encoding', 'rot-13', '2004')
-        self.assertEqual(stdout.strip(), conv(result_2004_text.encode('rot-13')).strip())
+        stdout = self.run_ok('--encoding', 'utf-16-le', '2004')
+        self.assertEqual(stdout.strip(), conv(result_2004_text.encode('utf-16-le')).strip())
 
     def test_option_locale(self):
         self.assertFailure('-L')
diff --git a/lib/python2.7/test/test_cmath.py b/lib/python2.7/test/test_cmath.py
index 5d10261..c1d0327 100644
--- a/lib/python2.7/test/test_cmath.py
+++ b/lib/python2.7/test/test_cmath.py
@@ -1,4 +1,4 @@
-from test.test_support import run_unittest
+from test.test_support import run_unittest, cpython_only
 from test.test_math import parse_testfile, test_file
 import unittest
 import cmath, math
@@ -351,17 +351,48 @@
             self.rAssertAlmostEqual(expected.imag, actual.imag,
                                         msg=error_message)
 
-    def assertCISEqual(self, a, b):
-        eps = 1E-7
-        if abs(a[0] - b[0]) > eps or abs(a[1] - b[1]) > eps:
-            self.fail((a ,b))
+    def check_polar(self, func):
+        def check(arg, expected):
+            got = func(arg)
+            for e, g in zip(expected, got):
+                self.rAssertAlmostEqual(e, g)
+        check(0, (0., 0.))
+        check(1, (1., 0.))
+        check(-1, (1., pi))
+        check(1j, (1., pi / 2))
+        check(-3j, (3., -pi / 2))
+        inf = float('inf')
+        check(complex(inf, 0), (inf, 0.))
+        check(complex(-inf, 0), (inf, pi))
+        check(complex(3, inf), (inf, pi / 2))
+        check(complex(5, -inf), (inf, -pi / 2))
+        check(complex(inf, inf), (inf, pi / 4))
+        check(complex(inf, -inf), (inf, -pi / 4))
+        check(complex(-inf, inf), (inf, 3 * pi / 4))
+        check(complex(-inf, -inf), (inf, -3 * pi / 4))
+        nan = float('nan')
+        check(complex(nan, 0), (nan, nan))
+        check(complex(0, nan), (nan, nan))
+        check(complex(nan, nan), (nan, nan))
+        check(complex(inf, nan), (inf, nan))
+        check(complex(-inf, nan), (inf, nan))
+        check(complex(nan, inf), (inf, nan))
+        check(complex(nan, -inf), (inf, nan))
 
     def test_polar(self):
-        self.assertCISEqual(polar(0), (0., 0.))
-        self.assertCISEqual(polar(1.), (1., 0.))
-        self.assertCISEqual(polar(-1.), (1., pi))
-        self.assertCISEqual(polar(1j), (1., pi/2))
-        self.assertCISEqual(polar(-1j), (1., -pi/2))
+        self.check_polar(polar)
+
+    @cpython_only
+    def test_polar_errno(self):
+        # Issue #24489: check a previously set C errno doesn't disturb polar()
+        from _testcapi import set_errno
+        def polar_with_errno_set(z):
+            set_errno(11)
+            try:
+                return polar(z)
+            finally:
+                set_errno(0)
+        self.check_polar(polar_with_errno_set)
 
     def test_phase(self):
         self.assertAlmostEqual(phase(0), 0.)
diff --git a/lib/python2.7/test/test_cmd_line_script.py b/lib/python2.7/test/test_cmd_line_script.py
index 8b05227..cefa1e9 100644
--- a/lib/python2.7/test/test_cmd_line_script.py
+++ b/lib/python2.7/test/test_cmd_line_script.py
@@ -1,5 +1,6 @@
 # Tests command line execution of scripts
 
+import contextlib
 import unittest
 import os
 import os.path
@@ -207,18 +208,69 @@
             launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
             self._check_import_error(launch_name, msg)
 
+    @contextlib.contextmanager
+    def setup_test_pkg(self, *args):
+        with temp_dir() as script_dir, \
+                test.test_support.change_cwd(script_dir):
+            pkg_dir = os.path.join(script_dir, 'test_pkg')
+            make_pkg(pkg_dir, *args)
+            yield pkg_dir
+
+    def check_dash_m_failure(self, *args):
+        rc, out, err = assert_python_failure('-m', *args)
+        if verbose > 1:
+            print(out)
+        self.assertEqual(rc, 1)
+        return err
+
     def test_dash_m_error_code_is_one(self):
         # If a module is invoked with the -m command line flag
         # and results in an error that the return code to the
         # shell is '1'
-        with temp_dir() as script_dir:
-            pkg_dir = os.path.join(script_dir, 'test_pkg')
-            make_pkg(pkg_dir)
+        with self.setup_test_pkg() as pkg_dir:
             script_name = _make_test_script(pkg_dir, 'other', "if __name__ == '__main__': raise ValueError")
-            rc, out, err = assert_python_failure('-m', 'test_pkg.other', *example_args)
-            if verbose > 1:
-                print(out)
+            err = self.check_dash_m_failure('test_pkg.other', *example_args)
+            self.assertIn(b'ValueError', err)
+
+    def test_dash_m_errors(self):
+        # Exercise error reporting for various invalid package executions
+        tests = (
+            ('__builtin__', br'No code object available'),
+            ('__builtin__.x', br'No module named'),
+            ('__builtin__.x.y', br'No module named'),
+            ('os.path', br'Loader.*cannot handle'),
+            ('importlib', br'No module named.*'
+                br'is a package and cannot be directly executed'),
+            ('importlib.nonexistant', br'No module named'),
+        )
+        for name, regex in tests:
+            rc, _, err = assert_python_failure('-m', name)
             self.assertEqual(rc, 1)
+            self.assertRegexpMatches(err, regex)
+            self.assertNotIn(b'Traceback', err)
+
+    def test_dash_m_init_traceback(self):
+        # These were wrapped in an ImportError and tracebacks were
+        # suppressed; see Issue 14285
+        exceptions = (ImportError, AttributeError, TypeError, ValueError)
+        for exception in exceptions:
+            exception = exception.__name__
+            init = "raise {0}('Exception in __init__.py')".format(exception)
+            with self.setup_test_pkg(init) as pkg_dir:
+                err = self.check_dash_m_failure('test_pkg')
+                self.assertIn(exception.encode('ascii'), err)
+                self.assertIn(b'Exception in __init__.py', err)
+                self.assertIn(b'Traceback', err)
+
+    def test_dash_m_main_traceback(self):
+        # Ensure that an ImportError's traceback is reported
+        with self.setup_test_pkg() as pkg_dir:
+            main = "raise ImportError('Exception in __main__ module')"
+            _make_test_script(pkg_dir, '__main__', main)
+            err = self.check_dash_m_failure('test_pkg')
+            self.assertIn(b'ImportError', err)
+            self.assertIn(b'Exception in __main__ module', err)
+            self.assertIn(b'Traceback', err)
 
 
 def test_main():
diff --git a/lib/python2.7/test/test_codeccallbacks.py b/lib/python2.7/test/test_codeccallbacks.py
index b9cd9c2..c11affd 100644
--- a/lib/python2.7/test/test_codeccallbacks.py
+++ b/lib/python2.7/test/test_codeccallbacks.py
@@ -836,6 +836,26 @@
             text = u'abc<def>ghi'*n
             text.translate(charmap)
 
+    def test_fake_error_class(self):
+        handlers = [
+            codecs.strict_errors,
+            codecs.ignore_errors,
+            codecs.replace_errors,
+            codecs.backslashreplace_errors,
+            codecs.xmlcharrefreplace_errors,
+        ]
+        for cls in UnicodeEncodeError, UnicodeDecodeError, UnicodeTranslateError:
+            class FakeUnicodeError(str):
+                __class__ = cls
+            for handler in handlers:
+                self.assertRaises(TypeError, handler, FakeUnicodeError())
+            class FakeUnicodeError(Exception):
+                __class__ = cls
+            for handler in handlers:
+                with self.assertRaises((TypeError, FakeUnicodeError)):
+                    handler(FakeUnicodeError())
+
+
 def test_main():
     test.test_support.run_unittest(CodecCallbackTest)
 
diff --git a/lib/python2.7/test/test_codecs.py b/lib/python2.7/test/test_codecs.py
index de80b07..57d5e06 100644
--- a/lib/python2.7/test/test_codecs.py
+++ b/lib/python2.7/test/test_codecs.py
@@ -573,9 +573,13 @@
             (b'\x00\xdcA\x00', u'\ufffdA'),
         ]
         for raw, expected in tests:
-            self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
-                              raw, 'strict', True)
-            self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
+            try:
+                with self.assertRaises(UnicodeDecodeError):
+                    codecs.utf_16_le_decode(raw, 'strict', True)
+                self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
+            except:
+                print 'raw=%r' % raw
+                raise
 
 class UTF16BETest(ReadTest):
     encoding = "utf-16-be"
@@ -610,9 +614,13 @@
             (b'\xdc\x00\x00A', u'\ufffdA'),
         ]
         for raw, expected in tests:
-            self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
-                              raw, 'strict', True)
-            self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
+            try:
+                with self.assertRaises(UnicodeDecodeError):
+                    codecs.utf_16_be_decode(raw, 'strict', True)
+                self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
+            except:
+                print 'raw=%r' % raw
+                raise
 
 class UTF8Test(ReadTest):
     encoding = "utf-8"
@@ -642,6 +650,32 @@
 class UTF7Test(ReadTest):
     encoding = "utf-7"
 
+    def test_ascii(self):
+        # Set D (directly encoded characters)
+        set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+                 'abcdefghijklmnopqrstuvwxyz'
+                 '0123456789'
+                 '\'(),-./:?')
+        self.assertEqual(set_d.encode(self.encoding), set_d)
+        self.assertEqual(set_d.decode(self.encoding), set_d)
+        # Set O (optional direct characters)
+        set_o = ' !"#$%&*;<=>@[]^_`{|}'
+        self.assertEqual(set_o.encode(self.encoding), set_o)
+        self.assertEqual(set_o.decode(self.encoding), set_o)
+        # +
+        self.assertEqual(u'a+b'.encode(self.encoding), 'a+-b')
+        self.assertEqual('a+-b'.decode(self.encoding), u'a+b')
+        # White spaces
+        ws = ' \t\n\r'
+        self.assertEqual(ws.encode(self.encoding), ws)
+        self.assertEqual(ws.decode(self.encoding), ws)
+        # Other ASCII characters
+        other_ascii = ''.join(sorted(set(chr(i) for i in range(0x80)) -
+                                     set(set_d + set_o + '+' + ws)))
+        self.assertEqual(other_ascii.encode(self.encoding),
+                         '+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU'
+                         'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-')
+
     def test_partial(self):
         self.check_partial(
             u"a+-b",
@@ -656,7 +690,9 @@
 
     def test_errors(self):
         tests = [
-            ('a\xffb', u'a\ufffdb'),
+            ('\xe1b', u'\ufffdb'),
+            ('a\xe1b', u'a\ufffdb'),
+            ('a\xe1\xe1b', u'a\ufffd\ufffdb'),
             ('a+IK', u'a\ufffd'),
             ('a+IK-b', u'a\ufffdb'),
             ('a+IK,b', u'a\ufffdb'),
@@ -672,16 +708,55 @@
             ('a+//,+IKw-b', u'a\ufffd\u20acb'),
             ('a+///,+IKw-b', u'a\uffff\ufffd\u20acb'),
             ('a+////,+IKw-b', u'a\uffff\ufffd\u20acb'),
+            ('a+IKw-b\xe1', u'a\u20acb\ufffd'),
+            ('a+IKw\xe1b', u'a\u20ac\ufffdb'),
         ]
         for raw, expected in tests:
-            self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode,
-                              raw, 'strict', True)
-            self.assertEqual(raw.decode('utf-7', 'replace'), expected)
+            try:
+                with self.assertRaises(UnicodeDecodeError):
+                    codecs.utf_7_decode(raw, 'strict', True)
+                self.assertEqual(raw.decode('utf-7', 'replace'), expected)
+            except:
+                print 'raw=%r' % raw
+                raise
 
     def test_nonbmp(self):
         self.assertEqual(u'\U000104A0'.encode(self.encoding), '+2AHcoA-')
         self.assertEqual(u'\ud801\udca0'.encode(self.encoding), '+2AHcoA-')
         self.assertEqual('+2AHcoA-'.decode(self.encoding), u'\U000104A0')
+        self.assertEqual('+2AHcoA'.decode(self.encoding), u'\U000104A0')
+        self.assertEqual(u'\u20ac\U000104A0'.encode(self.encoding), '+IKzYAdyg-')
+        self.assertEqual('+IKzYAdyg-'.decode(self.encoding), u'\u20ac\U000104A0')
+        self.assertEqual('+IKzYAdyg'.decode(self.encoding), u'\u20ac\U000104A0')
+        self.assertEqual(u'\u20ac\u20ac\U000104A0'.encode(self.encoding),
+                         '+IKwgrNgB3KA-')
+        self.assertEqual('+IKwgrNgB3KA-'.decode(self.encoding),
+                         u'\u20ac\u20ac\U000104A0')
+        self.assertEqual('+IKwgrNgB3KA'.decode(self.encoding),
+                         u'\u20ac\u20ac\U000104A0')
+
+    def test_lone_surrogates(self):
+        tests = [
+            ('a+2AE-b', u'a\ud801b'),
+            ('a+2AE\xe1b', u'a\ufffdb'),
+            ('a+2AE', u'a\ufffd'),
+            ('a+2AEA-b', u'a\ufffdb'),
+            ('a+2AH-b', u'a\ufffdb'),
+            ('a+IKzYAQ-b', u'a\u20ac\ud801b'),
+            ('a+IKzYAQ\xe1b', u'a\u20ac\ufffdb'),
+            ('a+IKzYAQA-b', u'a\u20ac\ufffdb'),
+            ('a+IKzYAd-b', u'a\u20ac\ufffdb'),
+            ('a+IKwgrNgB-b', u'a\u20ac\u20ac\ud801b'),
+            ('a+IKwgrNgB\xe1b', u'a\u20ac\u20ac\ufffdb'),
+            ('a+IKwgrNgB', u'a\u20ac\u20ac\ufffd'),
+            ('a+IKwgrNgBA-b', u'a\u20ac\u20ac\ufffdb'),
+        ]
+        for raw, expected in tests:
+            try:
+                self.assertEqual(raw.decode('utf-7', 'replace'), expected)
+            except:
+                print 'raw=%r' % raw
+                raise
 
 class UTF16ExTest(unittest.TestCase):
 
@@ -1395,14 +1470,14 @@
 class Str2StrTest(unittest.TestCase):
 
     def test_read(self):
-        sin = "\x80".encode("base64_codec")
+        sin = codecs.encode("\x80", "base64_codec")
         reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
         sout = reader.read()
         self.assertEqual(sout, "\x80")
         self.assertIsInstance(sout, str)
 
     def test_readline(self):
-        sin = "\x80".encode("base64_codec")
+        sin = codecs.encode("\x80", "base64_codec")
         reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
         sout = reader.readline()
         self.assertEqual(sout, "\x80")
@@ -1536,6 +1611,9 @@
 ]
 broken_incremental_coders = broken_unicode_with_streams[:]
 
+if sys.flags.py3k_warning:
+    broken_unicode_with_streams.append("rot_13")
+
 # The following encodings only support "strict" mode
 only_strict_mode = [
     "idna",
@@ -2098,6 +2176,21 @@
                 self.assertEqual(f.read(), data * 2)
 
 
+class TransformCodecTest(unittest.TestCase):
+
+    def test_quopri_stateless(self):
+        # Should encode with quotetabs=True
+        encoded = codecs.encode(b"space tab\teol \n", "quopri-codec")
+        self.assertEqual(encoded, b"space=20tab=09eol=20\n")
+        # But should still support unescaped tabs and spaces
+        unescaped = b"space tab eol\n"
+        self.assertEqual(codecs.decode(unescaped, "quopri-codec"), unescaped)
+
+    def test_uu_invalid(self):
+        # Missing "begin" line
+        self.assertRaises(ValueError, codecs.decode, "", "uu-codec")
+
+
 def test_main():
     test_support.run_unittest(
         UTF32Test,
@@ -2129,12 +2222,9 @@
         UnicodeEscapeTest,
         RawUnicodeEscapeTest,
         BomTest,
+        TransformCodecTest,
     )
 
-    def test_uu_invalid(self):
-        # Missing "begin" line
-        self.assertRaises(ValueError, codecs.decode, "", "uu-codec")
-
 
 if __name__ == "__main__":
     test_main()
diff --git a/lib/python2.7/test/test_collections.py b/lib/python2.7/test/test_collections.py
index cd27227..125a68f 100644
--- a/lib/python2.7/test/test_collections.py
+++ b/lib/python2.7/test/test_collections.py
@@ -1,19 +1,24 @@
-
-import unittest, doctest, operator
-import inspect
-from test import test_support
-from collections import namedtuple, Counter, OrderedDict
-from test import mapping_tests
-import pickle, cPickle, copy
-from random import randrange, shuffle
+import collections
+import copy
+import doctest
 import keyword
+import operator
+import pickle
+import cPickle
+from random import choice, randrange
 import re
+import string
 import sys
+from test import test_support
+import unittest
+
+from collections import namedtuple, Counter, OrderedDict
 from collections import Hashable, Iterable, Iterator
 from collections import Sized, Container, Callable
 from collections import Set, MutableSet
 from collections import Mapping, MutableMapping
 from collections import Sequence, MutableSequence
+
 # Silence deprecation warning
 sets = test_support.import_module('sets', deprecated=True)
 
@@ -178,8 +183,7 @@
         self.assertEqual(Dot(1)._fields, ('d',))
 
         n = 5000
-        import string, random
-        names = list(set(''.join([random.choice(string.ascii_letters)
+        names = list(set(''.join([choice(string.ascii_letters)
                                   for j in range(10)]) for i in range(n)))
         n = len(names)
         Big = namedtuple('Big', names)
@@ -556,7 +560,7 @@
 
     def test_issue_4920(self):
         # MutableSet.pop() method did not work
-        class MySet(collections.MutableSet):
+        class MySet(MutableSet):
             __slots__=['__s']
             def __init__(self,items=None):
                 if items is None:
@@ -802,7 +806,7 @@
             self.assertTrue(issubclass(sample, Mapping))
         self.validate_abstract_methods(Mapping, '__contains__', '__iter__', '__len__',
             '__getitem__')
-        class MyMapping(collections.Mapping):
+        class MyMapping(Mapping):
             def __len__(self):
                 return 0
             def __getitem__(self, i):
@@ -1038,286 +1042,11 @@
         self.assertRaises(TypeError, Counter().subtract, {}, {})
         self.assertRaises(TypeError, Counter.subtract)
 
-class TestOrderedDict(unittest.TestCase):
-
-    def test_init(self):
-        with self.assertRaises(TypeError):
-            OrderedDict([('a', 1), ('b', 2)], None)                                 # too many args
-        pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
-        self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs)           # dict input
-        self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs)         # kwds input
-        self.assertEqual(list(OrderedDict(pairs).items()), pairs)                   # pairs input
-        self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)],
-                                          c=3, e=5).items()), pairs)                # mixed input
-
-        # make sure no positional args conflict with possible kwdargs
-        self.assertEqual(list(OrderedDict(self=42).items()), [('self', 42)])
-        self.assertEqual(list(OrderedDict(other=42).items()), [('other', 42)])
-        self.assertRaises(TypeError, OrderedDict, 42)
-        self.assertRaises(TypeError, OrderedDict, (), ())
-        self.assertRaises(TypeError, OrderedDict.__init__)
-
-        # Make sure that direct calls to __init__ do not clear previous contents
-        d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
-        d.__init__([('e', 5), ('f', 6)], g=7, d=4)
-        self.assertEqual(list(d.items()),
-            [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
-
-    def test_update(self):
-        with self.assertRaises(TypeError):
-            OrderedDict().update([('a', 1), ('b', 2)], None)                        # too many args
-        pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
-        od = OrderedDict()
-        od.update(dict(pairs))
-        self.assertEqual(sorted(od.items()), pairs)                                 # dict input
-        od = OrderedDict()
-        od.update(**dict(pairs))
-        self.assertEqual(sorted(od.items()), pairs)                                 # kwds input
-        od = OrderedDict()
-        od.update(pairs)
-        self.assertEqual(list(od.items()), pairs)                                   # pairs input
-        od = OrderedDict()
-        od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5)
-        self.assertEqual(list(od.items()), pairs)                                   # mixed input
-
-        # Issue 9137: Named argument called 'other' or 'self'
-        # shouldn't be treated specially.
-        od = OrderedDict()
-        od.update(self=23)
-        self.assertEqual(list(od.items()), [('self', 23)])
-        od = OrderedDict()
-        od.update(other={})
-        self.assertEqual(list(od.items()), [('other', {})])
-        od = OrderedDict()
-        od.update(red=5, blue=6, other=7, self=8)
-        self.assertEqual(sorted(list(od.items())),
-                         [('blue', 6), ('other', 7), ('red', 5), ('self', 8)])
-
-        # Make sure that direct calls to update do not clear previous contents
-        # add that updates items are not moved to the end
-        d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
-        d.update([('e', 5), ('f', 6)], g=7, d=4)
-        self.assertEqual(list(d.items()),
-            [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
-
-        self.assertRaises(TypeError, OrderedDict().update, 42)
-        self.assertRaises(TypeError, OrderedDict().update, (), ())
-        self.assertRaises(TypeError, OrderedDict.update)
-
-    def test_abc(self):
-        self.assertIsInstance(OrderedDict(), MutableMapping)
-        self.assertTrue(issubclass(OrderedDict, MutableMapping))
-
-    def test_clear(self):
-        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
-        shuffle(pairs)
-        od = OrderedDict(pairs)
-        self.assertEqual(len(od), len(pairs))
-        od.clear()
-        self.assertEqual(len(od), 0)
-
-    def test_delitem(self):
-        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
-        od = OrderedDict(pairs)
-        del od['a']
-        self.assertNotIn('a', od)
-        with self.assertRaises(KeyError):
-            del od['a']
-        self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
-
-    def test_setitem(self):
-        od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)])
-        od['c'] = 10           # existing element
-        od['f'] = 20           # new element
-        self.assertEqual(list(od.items()),
-                         [('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)])
-
-    def test_iterators(self):
-        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
-        shuffle(pairs)
-        od = OrderedDict(pairs)
-        self.assertEqual(list(od), [t[0] for t in pairs])
-        self.assertEqual(od.keys()[:], [t[0] for t in pairs])
-        self.assertEqual(od.values()[:], [t[1] for t in pairs])
-        self.assertEqual(od.items()[:], pairs)
-        self.assertEqual(list(od.iterkeys()), [t[0] for t in pairs])
-        self.assertEqual(list(od.itervalues()), [t[1] for t in pairs])
-        self.assertEqual(list(od.iteritems()), pairs)
-        self.assertEqual(list(reversed(od)),
-                         [t[0] for t in reversed(pairs)])
-
-    def test_popitem(self):
-        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
-        shuffle(pairs)
-        od = OrderedDict(pairs)
-        while pairs:
-            self.assertEqual(od.popitem(), pairs.pop())
-        with self.assertRaises(KeyError):
-            od.popitem()
-        self.assertEqual(len(od), 0)
-
-    def test_pop(self):
-        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
-        shuffle(pairs)
-        od = OrderedDict(pairs)
-        shuffle(pairs)
-        while pairs:
-            k, v = pairs.pop()
-            self.assertEqual(od.pop(k), v)
-        with self.assertRaises(KeyError):
-            od.pop('xyz')
-        self.assertEqual(len(od), 0)
-        self.assertEqual(od.pop(k, 12345), 12345)
-
-        # make sure pop still works when __missing__ is defined
-        class Missing(OrderedDict):
-            def __missing__(self, key):
-                return 0
-        m = Missing(a=1)
-        self.assertEqual(m.pop('b', 5), 5)
-        self.assertEqual(m.pop('a', 6), 1)
-        self.assertEqual(m.pop('a', 6), 6)
-        with self.assertRaises(KeyError):
-            m.pop('a')
-
-    def test_equality(self):
-        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
-        shuffle(pairs)
-        od1 = OrderedDict(pairs)
-        od2 = OrderedDict(pairs)
-        self.assertEqual(od1, od2)          # same order implies equality
-        pairs = pairs[2:] + pairs[:2]
-        od2 = OrderedDict(pairs)
-        self.assertNotEqual(od1, od2)       # different order implies inequality
-        # comparison to regular dict is not order sensitive
-        self.assertEqual(od1, dict(od2))
-        self.assertEqual(dict(od2), od1)
-        # different length implied inequality
-        self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
-
-    def test_copying(self):
-        # Check that ordered dicts are copyable, deepcopyable, picklable,
-        # and have a repr/eval round-trip
-        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
-        od = OrderedDict(pairs)
-        update_test = OrderedDict()
-        update_test.update(od)
-        for i, dup in enumerate([
-                    od.copy(),
-                    copy.copy(od),
-                    copy.deepcopy(od),
-                    pickle.loads(pickle.dumps(od, 0)),
-                    pickle.loads(pickle.dumps(od, 1)),
-                    pickle.loads(pickle.dumps(od, 2)),
-                    pickle.loads(pickle.dumps(od, -1)),
-                    eval(repr(od)),
-                    update_test,
-                    OrderedDict(od),
-                    ]):
-            self.assertTrue(dup is not od)
-            self.assertEqual(dup, od)
-            self.assertEqual(list(dup.items()), list(od.items()))
-            self.assertEqual(len(dup), len(od))
-            self.assertEqual(type(dup), type(od))
-
-    def test_yaml_linkage(self):
-        # Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature.
-        # In yaml, lists are native but tuples are not.
-        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
-        od = OrderedDict(pairs)
-        # yaml.dump(od) -->
-        # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n  - [b, 2]\n'
-        self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1]))
-
-    def test_reduce_not_too_fat(self):
-        # do not save instance dictionary if not needed
-        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
-        od = OrderedDict(pairs)
-        self.assertEqual(len(od.__reduce__()), 2)
-        od.x = 10
-        self.assertEqual(len(od.__reduce__()), 3)
-
-    def test_repr(self):
-        od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])
-        self.assertEqual(repr(od),
-            "OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])")
-        self.assertEqual(eval(repr(od)), od)
-        self.assertEqual(repr(OrderedDict()), "OrderedDict()")
-
-    def test_repr_recursive(self):
-        # See issue #9826
-        od = OrderedDict.fromkeys('abc')
-        od['x'] = od
-        self.assertEqual(repr(od),
-            "OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])")
-
-    def test_setdefault(self):
-        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
-        shuffle(pairs)
-        od = OrderedDict(pairs)
-        pair_order = list(od.items())
-        self.assertEqual(od.setdefault('a', 10), 3)
-        # make sure order didn't change
-        self.assertEqual(list(od.items()), pair_order)
-        self.assertEqual(od.setdefault('x', 10), 10)
-        # make sure 'x' is added to the end
-        self.assertEqual(list(od.items())[-1], ('x', 10))
-
-        # make sure setdefault still works when __missing__ is defined
-        class Missing(OrderedDict):
-            def __missing__(self, key):
-                return 0
-        self.assertEqual(Missing().setdefault(5, 9), 9)
-
-    def test_reinsert(self):
-        # Given insert a, insert b, delete a, re-insert a,
-        # verify that a is now later than b.
-        od = OrderedDict()
-        od['a'] = 1
-        od['b'] = 2
-        del od['a']
-        od['a'] = 1
-        self.assertEqual(list(od.items()), [('b', 2), ('a', 1)])
-
-    def test_views(self):
-        s = 'the quick brown fox jumped over a lazy dog yesterday before dawn'.split()
-        od = OrderedDict.fromkeys(s)
-        self.assertEqual(list(od.viewkeys()),  s)
-        self.assertEqual(list(od.viewvalues()),  [None for k in s])
-        self.assertEqual(list(od.viewitems()),  [(k, None) for k in s])
-
-    def test_override_update(self):
-        # Verify that subclasses can override update() without breaking __init__()
-        class MyOD(OrderedDict):
-            def update(self, *args, **kwds):
-                raise Exception()
-        items = [('a', 1), ('c', 3), ('b', 2)]
-        self.assertEqual(list(MyOD(items).items()), items)
-
-class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
-    type2test = OrderedDict
-
-    def test_popitem(self):
-        d = self._empty_mapping()
-        self.assertRaises(KeyError, d.popitem)
-
-class MyOrderedDict(OrderedDict):
-    pass
-
-class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
-    type2test = MyOrderedDict
-
-    def test_popitem(self):
-        d = self._empty_mapping()
-        self.assertRaises(KeyError, d.popitem)
-
-import collections
 
 def test_main(verbose=None):
     NamedTupleDocs = doctest.DocTestSuite(module=collections)
     test_classes = [TestNamedTuple, NamedTupleDocs, TestOneTrickPonyABCs,
-                    TestCollectionABCs, TestCounter,
-                    TestOrderedDict, GeneralMappingTests, SubclassMappingTests]
+                    TestCollectionABCs, TestCounter]
     test_support.run_unittest(*test_classes)
     test_support.run_doctest(collections, verbose)
 
diff --git a/lib/python2.7/test/test_compile.py b/lib/python2.7/test/test_compile.py
index cfc6389..e954a0c 100644
--- a/lib/python2.7/test/test_compile.py
+++ b/lib/python2.7/test/test_compile.py
@@ -3,6 +3,9 @@
 import sys
 import _ast
 from test import test_support
+from test import script_helper
+import os
+import tempfile
 import textwrap
 
 class TestSpecifics(unittest.TestCase):
@@ -555,6 +558,33 @@
         ast.body = [_ast.BoolOp()]
         self.assertRaises(TypeError, compile, ast, '<ast>', 'exec')
 
+    def test_yet_more_evil_still_undecodable(self):
+        # Issue #25388
+        src = b"#\x00\n#\xfd\n"
+        tmpd = tempfile.mkdtemp()
+        try:
+            fn = os.path.join(tmpd, "bad.py")
+            with open(fn, "wb") as fp:
+                fp.write(src)
+            rc, out, err = script_helper.assert_python_failure(fn)
+        finally:
+            test_support.rmtree(tmpd)
+        self.assertIn(b"Non-ASCII", err)
+
+    def test_null_terminated(self):
+        # The source code is null-terminated internally, but bytes-like
+        # objects are accepted, which could be not terminated.
+        with self.assertRaisesRegexp(TypeError, "without null bytes"):
+            compile(u"123\x00", "<dummy>", "eval")
+        with test_support.check_py3k_warnings():
+            with self.assertRaisesRegexp(TypeError, "without null bytes"):
+                compile(buffer("123\x00"), "<dummy>", "eval")
+            code = compile(buffer("123\x00", 1, 2), "<dummy>", "eval")
+            self.assertEqual(eval(code), 23)
+            code = compile(buffer("1234", 1, 2), "<dummy>", "eval")
+            self.assertEqual(eval(code), 23)
+            code = compile(buffer("$23$", 1, 2), "<dummy>", "eval")
+            self.assertEqual(eval(code), 23)
 
 class TestStackSize(unittest.TestCase):
     # These tests check that the computed stack size for a code object
@@ -593,6 +623,65 @@
         code += "   x and x\n" * self.N
         self.check_stack_size(code)
 
+    def check_constant(self, func, expected):
+        for const in func.__code__.co_consts:
+            if repr(const) == repr(expected):
+                break
+        else:
+            self.fail("unable to find constant %r in %r"
+                      % (expected, func.__code__.co_consts))
+
+    # Merging equal constants is not a strict requirement for the Python
+    # semantics, it's a more an implementation detail.
+    @test_support.cpython_only
+    def test_merge_constants(self):
+        # Issue #25843: compile() must merge constants which are equal
+        # and have the same type.
+
+        def check_same_constant(const):
+            ns = {}
+            code = "f1, f2 = lambda: %r, lambda: %r" % (const, const)
+            exec(code, ns)
+            f1 = ns['f1']
+            f2 = ns['f2']
+            self.assertIs(f1.__code__, f2.__code__)
+            self.check_constant(f1, const)
+            self.assertEqual(repr(f1()), repr(const))
+
+        check_same_constant(None)
+        check_same_constant(0)
+        check_same_constant(0.0)
+        check_same_constant(b'abc')
+        check_same_constant('abc')
+
+    def test_dont_merge_constants(self):
+        # Issue #25843: compile() must not merge constants which are equal
+        # but have a different type.
+
+        def check_different_constants(const1, const2):
+            ns = {}
+            exec("f1, f2 = lambda: %r, lambda: %r" % (const1, const2), ns)
+            f1 = ns['f1']
+            f2 = ns['f2']
+            self.assertIsNot(f1.__code__, f2.__code__)
+            self.check_constant(f1, const1)
+            self.check_constant(f2, const2)
+            self.assertEqual(repr(f1()), repr(const1))
+            self.assertEqual(repr(f2()), repr(const2))
+
+        check_different_constants(0, 0.0)
+        check_different_constants(+0.0, -0.0)
+        check_different_constants((0,), (0.0,))
+
+        # check_different_constants() cannot be used because repr(-0j) is
+        # '(-0-0j)', but when '(-0-0j)' is evaluated to 0j: we loose the sign.
+        f1, f2 = lambda: +0.0j, lambda: -0.0j
+        self.assertIsNot(f1.__code__, f2.__code__)
+        self.check_constant(f1, +0.0j)
+        self.check_constant(f2, -0.0j)
+        self.assertEqual(repr(f1()), repr(+0.0j))
+        self.assertEqual(repr(f2()), repr(-0.0j))
+
 
 def test_main():
     test_support.run_unittest(__name__)
diff --git a/lib/python2.7/test/test_contextlib.py b/lib/python2.7/test/test_contextlib.py
index f28c95e..301564b 100644
--- a/lib/python2.7/test/test_contextlib.py
+++ b/lib/python2.7/test/test_contextlib.py
@@ -106,6 +106,14 @@
         baz = self._create_contextmanager_attribs()
         self.assertEqual(baz.__doc__, "Whee!")
 
+    def test_keywords(self):
+        # Ensure no keyword arguments are inhibited
+        @contextmanager
+        def woohoo(self, func, args, kwds):
+            yield (self, func, args, kwds)
+        with woohoo(self=11, func=22, args=33, kwds=44) as target:
+            self.assertEqual(target, (11, 22, 33, 44))
+
 class NestedTestCase(unittest.TestCase):
 
     # XXX This needs more work
diff --git a/lib/python2.7/test/test_copy.py b/lib/python2.7/test/test_copy.py
index 6b64f10..d65f6a2 100644
--- a/lib/python2.7/test/test_copy.py
+++ b/lib/python2.7/test/test_copy.py
@@ -82,7 +82,8 @@
             pass
         def f():
             pass
-        tests = [None, 42, 2L**100, 3.14, True, False, 1j,
+        tests = [None, Ellipsis,
+                 42, 2L**100, 3.14, True, False, 1j,
                  "hello", u"hello\u1234", f.func_code,
                  NewStyle, xrange(10), Classic, max]
         for x in tests:
@@ -90,15 +91,57 @@
 
     def test_copy_list(self):
         x = [1, 2, 3]
-        self.assertEqual(copy.copy(x), x)
+        y = copy.copy(x)
+        self.assertEqual(y, x)
+        self.assertIsNot(y, x)
+        x = []
+        y = copy.copy(x)
+        self.assertEqual(y, x)
+        self.assertIsNot(y, x)
 
     def test_copy_tuple(self):
         x = (1, 2, 3)
-        self.assertEqual(copy.copy(x), x)
+        self.assertIs(copy.copy(x), x)
+        x = ()
+        self.assertIs(copy.copy(x), x)
+        x = (1, 2, 3, [])
+        self.assertIs(copy.copy(x), x)
 
     def test_copy_dict(self):
         x = {"foo": 1, "bar": 2}
-        self.assertEqual(copy.copy(x), x)
+        y = copy.copy(x)
+        self.assertEqual(y, x)
+        self.assertIsNot(y, x)
+        x = {}
+        y = copy.copy(x)
+        self.assertEqual(y, x)
+        self.assertIsNot(y, x)
+
+    def test_copy_set(self):
+        x = {1, 2, 3}
+        y = copy.copy(x)
+        self.assertEqual(y, x)
+        self.assertIsNot(y, x)
+        x = set()
+        y = copy.copy(x)
+        self.assertEqual(y, x)
+        self.assertIsNot(y, x)
+
+    def test_copy_frozenset(self):
+        x = frozenset({1, 2, 3})
+        self.assertIs(copy.copy(x), x)
+        x = frozenset()
+        self.assertIs(copy.copy(x), x)
+
+    def test_copy_bytearray(self):
+        x = bytearray(b'abc')
+        y = copy.copy(x)
+        self.assertEqual(y, x)
+        self.assertIsNot(y, x)
+        x = bytearray()
+        y = copy.copy(x)
+        self.assertEqual(y, x)
+        self.assertIsNot(y, x)
 
     def test_copy_inst_vanilla(self):
         class C:
@@ -165,6 +208,9 @@
                 return cmp(self.foo, other.foo)
         x = C(42)
         self.assertEqual(copy.copy(x), x)
+        # State with boolean value is false (issue #25718)
+        x = C(0.0)
+        self.assertEqual(copy.copy(x), x)
 
     # The deepcopy() method
 
@@ -395,6 +441,12 @@
         x = C([42])
         y = copy.deepcopy(x)
         self.assertEqual(y, x)
+        self.assertIsNot(y, x)
+        self.assertIsNot(y.foo, x.foo)
+        # State with boolean value is false (issue #25718)
+        x = C([])
+        y = copy.deepcopy(x)
+        self.assertEqual(y, x)
         self.assertTrue(y is not x)
         self.assertTrue(y.foo is not x.foo)
 
diff --git a/lib/python2.7/test/test_cpickle.py b/lib/python2.7/test/test_cpickle.py
index 3bc700b..c9ec788 100644
--- a/lib/python2.7/test/test_cpickle.py
+++ b/lib/python2.7/test/test_cpickle.py
@@ -1,8 +1,10 @@
 import cPickle
 import cStringIO
 import io
+import functools
 import unittest
-from test.pickletester import (AbstractPickleTests,
+from test.pickletester import (AbstractUnpickleTests,
+                               AbstractPickleTests,
                                AbstractPickleModuleTests,
                                AbstractPicklerUnpicklerObjectTests,
                                BigmemPickleTests)
@@ -40,7 +42,8 @@
         test_support.unlink(test_support.TESTFN)
 
 
-class cPickleTests(AbstractPickleTests, AbstractPickleModuleTests):
+class cPickleTests(AbstractUnpickleTests, AbstractPickleTests,
+                   AbstractPickleModuleTests):
 
     def setUp(self):
         self.dumps = cPickle.dumps
@@ -48,6 +51,36 @@
 
     error = cPickle.BadPickleGet
     module = cPickle
+    bad_stack_errors = (cPickle.UnpicklingError,)
+    bad_mark_errors = (EOFError,)
+    truncated_errors = (cPickle.UnpicklingError, EOFError,
+                        AttributeError, ValueError)
+
+class cPickleUnpicklerTests(AbstractUnpickleTests):
+
+    def loads(self, buf):
+        f = self.input(buf)
+        try:
+            p = cPickle.Unpickler(f)
+            return p.load()
+        finally:
+            self.close(f)
+
+    error = cPickle.BadPickleGet
+    bad_stack_errors = (cPickle.UnpicklingError,)
+    bad_mark_errors = (EOFError,)
+    truncated_errors = (cPickle.UnpicklingError, EOFError,
+                        AttributeError, ValueError)
+
+class cStringIOCUnpicklerTests(cStringIOMixin, cPickleUnpicklerTests):
+    pass
+
+class BytesIOCUnpicklerTests(BytesIOMixin, cPickleUnpicklerTests):
+    pass
+
+class FileIOCUnpicklerTests(FileIOMixin, cPickleUnpicklerTests):
+    pass
+
 
 class cPicklePicklerTests(AbstractPickleTests):
 
@@ -69,8 +102,6 @@
         finally:
             self.close(f)
 
-    error = cPickle.BadPickleGet
-
 class cStringIOCPicklerTests(cStringIOMixin, cPicklePicklerTests):
     pass
 
@@ -129,33 +160,6 @@
         finally:
             self.close(f)
 
-    error = cPickle.BadPickleGet
-
-    def test_recursive_list(self):
-        self.assertRaises(ValueError,
-                          AbstractPickleTests.test_recursive_list,
-                          self)
-
-    def test_recursive_tuple(self):
-        self.assertRaises(ValueError,
-                          AbstractPickleTests.test_recursive_tuple,
-                          self)
-
-    def test_recursive_inst(self):
-        self.assertRaises(ValueError,
-                          AbstractPickleTests.test_recursive_inst,
-                          self)
-
-    def test_recursive_dict(self):
-        self.assertRaises(ValueError,
-                          AbstractPickleTests.test_recursive_dict,
-                          self)
-
-    def test_recursive_multi(self):
-        self.assertRaises(ValueError,
-                          AbstractPickleTests.test_recursive_multi,
-                          self)
-
     def test_nonrecursive_deep(self):
         # If it's not cyclic, it should pickle OK even if the nesting
         # depth exceeds PY_CPICKLE_FAST_LIMIT.  That happens to be
@@ -167,6 +171,19 @@
         b = self.loads(self.dumps(a))
         self.assertEqual(a, b)
 
+for name in dir(AbstractPickleTests):
+    if name.startswith('test_recursive_'):
+        func = getattr(AbstractPickleTests, name)
+        if '_subclass' in name and '_and_inst' not in name:
+            assert_args = RuntimeError, 'maximum recursion depth exceeded'
+        else:
+            assert_args = ValueError, "can't pickle cyclic objects"
+        def wrapper(self, func=func, assert_args=assert_args):
+            with self.assertRaisesRegexp(*assert_args):
+                func(self)
+        functools.update_wrapper(wrapper, func)
+        setattr(cPickleFastPicklerTests, name, wrapper)
+
 class cStringIOCPicklerFastTests(cStringIOMixin, cPickleFastPicklerTests):
     pass
 
@@ -219,6 +236,9 @@
 def test_main():
     test_support.run_unittest(
         cPickleTests,
+        cStringIOCUnpicklerTests,
+        BytesIOCUnpicklerTests,
+        FileIOCUnpicklerTests,
         cStringIOCPicklerTests,
         BytesIOCPicklerTests,
         FileIOCPicklerTests,
diff --git a/lib/python2.7/test/test_csv.py b/lib/python2.7/test/test_csv.py
index e2eec70..d456759 100644
--- a/lib/python2.7/test/test_csv.py
+++ b/lib/python2.7/test/test_csv.py
@@ -2,6 +2,7 @@
 # Copyright (C) 2001,2002 Python Software Foundation
 # csv package unit tests
 
+import copy
 import sys
 import os
 import unittest
@@ -10,6 +11,7 @@
 import csv
 import gc
 import io
+import pickle
 from test import test_support
 
 class Test_Csv(unittest.TestCase):
@@ -466,6 +468,18 @@
         self.assertRaises(TypeError, csv.reader, [], quoting = -1)
         self.assertRaises(TypeError, csv.reader, [], quoting = 100)
 
+    # See issue #22995
+    ## def test_copy(self):
+    ##     for name in csv.list_dialects():
+    ##         dialect = csv.get_dialect(name)
+    ##         self.assertRaises(TypeError, copy.copy, dialect)
+
+    ## def test_pickle(self):
+    ##     for name in csv.list_dialects():
+    ##         dialect = csv.get_dialect(name)
+    ##         for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+    ##             self.assertRaises(TypeError, pickle.dumps, dialect, proto)
+
 class TestCsvBase(unittest.TestCase):
     def readerAssertEqual(self, input, expected_result):
         fd, name = tempfile.mkstemp()
diff --git a/lib/python2.7/test/test_decimal.py b/lib/python2.7/test/test_decimal.py
index 3b3d9d1..14b7f42 100644
--- a/lib/python2.7/test/test_decimal.py
+++ b/lib/python2.7/test/test_decimal.py
@@ -16,7 +16,7 @@
 
 Cowlishaw's tests can be downloaded from:
 
-   www2.hursley.ibm.com/decimal/dectest.zip
+   http://speleotrove.com/decimal/dectest.zip
 
 This test module can be called from command line with one parameter (Arithmetic
 or Behaviour) to test each part, or without parameter to test both parts. If
diff --git a/lib/python2.7/test/test_descr.py b/lib/python2.7/test/test_descr.py
index 0ac458f..b17bdbf 100644
--- a/lib/python2.7/test/test_descr.py
+++ b/lib/python2.7/test/test_descr.py
@@ -4065,6 +4065,37 @@
         else:
             assert 0, "best_base calculation found wanting"
 
+    def test_unsubclassable_types(self):
+        with self.assertRaises(TypeError):
+            class X(types.NoneType):
+                pass
+        with self.assertRaises(TypeError):
+            class X(object, types.NoneType):
+                pass
+        with self.assertRaises(TypeError):
+            class X(types.NoneType, object):
+                pass
+        class O(object):
+            pass
+        with self.assertRaises(TypeError):
+            class X(O, types.NoneType):
+                pass
+        with self.assertRaises(TypeError):
+            class X(types.NoneType, O):
+                pass
+
+        class X(object):
+            pass
+        with self.assertRaises(TypeError):
+            X.__bases__ = types.NoneType,
+        with self.assertRaises(TypeError):
+            X.__bases__ = object, types.NoneType
+        with self.assertRaises(TypeError):
+            X.__bases__ = types.NoneType, object
+        with self.assertRaises(TypeError):
+            X.__bases__ = O, types.NoneType
+        with self.assertRaises(TypeError):
+            X.__bases__ = types.NoneType, O
 
     def test_mutable_bases_with_failing_mro(self):
         # Testing mutable bases with failing mro...
@@ -4668,6 +4699,7 @@
         for o in gc.get_objects():
             self.assertIsNot(type(o), X)
 
+
 class DictProxyTests(unittest.TestCase):
     def setUp(self):
         class C(object):
@@ -4732,6 +4764,26 @@
         type.mro(tuple)
 
 
+class PicklingTests(unittest.TestCase):
+
+    def test_issue24097(self):
+        # Slot name is freed inside __getattr__ and is later used.
+        class S(str):  # Not interned
+            pass
+        class A(object):
+            __slotnames__ = [S('spam')]
+            def __getattr__(self, attr):
+                if attr == 'spam':
+                    A.__slotnames__[:] = [S('spam')]
+                    return 42
+                else:
+                    raise AttributeError
+
+        import copy_reg
+        expected = (copy_reg.__newobj__, (A,), ({}, {'spam': 42}), None, None)
+        self.assertEqual(A().__reduce__(2), expected)
+
+
 def test_main():
     deprecations = [(r'complex divmod\(\), // and % are deprecated$',
                      DeprecationWarning)]
@@ -4743,7 +4795,8 @@
     with test_support.check_warnings(*deprecations):
         # Run all local test cases, with PTypesLongInitTest first.
         test_support.run_unittest(PTypesLongInitTest, OperatorsTest,
-                                  ClassPropertiesAndMethods, DictProxyTests)
+                                  ClassPropertiesAndMethods, DictProxyTests,
+                                  PicklingTests)
 
 if __name__ == "__main__":
     test_main()
diff --git a/lib/python2.7/test/test_dict.py b/lib/python2.7/test/test_dict.py
index a5685b9..1c63fc0 100644
--- a/lib/python2.7/test/test_dict.py
+++ b/lib/python2.7/test/test_dict.py
@@ -448,7 +448,7 @@
         # (D) subclass defines __missing__ method returning a value
         # (E) subclass defines __missing__ method raising RuntimeError
         # (F) subclass sets __missing__ instance variable (no effect)
-        # (G) subclass doesn't define __missing__ at a all
+        # (G) subclass doesn't define __missing__ at all
         class D(dict):
             def __missing__(self, key):
                 return 42
diff --git a/lib/python2.7/test/test_dictviews.py b/lib/python2.7/test/test_dictviews.py
index 30cfb93..b585bdd 100644
--- a/lib/python2.7/test/test_dictviews.py
+++ b/lib/python2.7/test/test_dictviews.py
@@ -1,4 +1,7 @@
+import copy
+import pickle
 import unittest
+import collections
 from test import test_support
 
 class DictSetTest(unittest.TestCase):
@@ -95,6 +98,7 @@
         self.assertEqual(d1.viewkeys() & set(d1.viewkeys()), {'a', 'b'})
         self.assertEqual(d1.viewkeys() & set(d2.viewkeys()), {'b'})
         self.assertEqual(d1.viewkeys() & set(d3.viewkeys()), set())
+        self.assertEqual(d1.viewkeys() & tuple(d1.viewkeys()), {'a', 'b'})
 
         self.assertEqual(d1.viewkeys() | d1.viewkeys(), {'a', 'b'})
         self.assertEqual(d1.viewkeys() | d2.viewkeys(), {'a', 'b', 'c'})
@@ -103,6 +107,7 @@
         self.assertEqual(d1.viewkeys() | set(d2.viewkeys()), {'a', 'b', 'c'})
         self.assertEqual(d1.viewkeys() | set(d3.viewkeys()),
                          {'a', 'b', 'd', 'e'})
+        self.assertEqual(d1.viewkeys() | (1, 2), {'a', 'b', 1, 2})
 
         self.assertEqual(d1.viewkeys() ^ d1.viewkeys(), set())
         self.assertEqual(d1.viewkeys() ^ d2.viewkeys(), {'a', 'c'})
@@ -111,6 +116,7 @@
         self.assertEqual(d1.viewkeys() ^ set(d2.viewkeys()), {'a', 'c'})
         self.assertEqual(d1.viewkeys() ^ set(d3.viewkeys()),
                          {'a', 'b', 'd', 'e'})
+        self.assertEqual(d1.viewkeys() ^ tuple(d2.keys()), {'a', 'c'})
 
         self.assertEqual(d1.viewkeys() - d1.viewkeys(), set())
         self.assertEqual(d1.viewkeys() - d2.viewkeys(), {'a'})
@@ -118,6 +124,7 @@
         self.assertEqual(d1.viewkeys() - set(d1.viewkeys()), set())
         self.assertEqual(d1.viewkeys() - set(d2.viewkeys()), {'a'})
         self.assertEqual(d1.viewkeys() - set(d3.viewkeys()), {'a', 'b'})
+        self.assertEqual(d1.viewkeys() - (0, 1), {'a', 'b'})
 
     def test_items_set_operations(self):
         d1 = {'a': 1, 'b': 2}
@@ -164,7 +171,42 @@
         d[42] = d.viewvalues()
         self.assertRaises(RuntimeError, repr, d)
 
+    def test_abc_registry(self):
+        d = dict(a=1)
 
+        self.assertIsInstance(d.viewkeys(), collections.KeysView)
+        self.assertIsInstance(d.viewkeys(), collections.MappingView)
+        self.assertIsInstance(d.viewkeys(), collections.Set)
+        self.assertIsInstance(d.viewkeys(), collections.Sized)
+        self.assertIsInstance(d.viewkeys(), collections.Iterable)
+        self.assertIsInstance(d.viewkeys(), collections.Container)
+
+        self.assertIsInstance(d.viewvalues(), collections.ValuesView)
+        self.assertIsInstance(d.viewvalues(), collections.MappingView)
+        self.assertIsInstance(d.viewvalues(), collections.Sized)
+
+        self.assertIsInstance(d.viewitems(), collections.ItemsView)
+        self.assertIsInstance(d.viewitems(), collections.MappingView)
+        self.assertIsInstance(d.viewitems(), collections.Set)
+        self.assertIsInstance(d.viewitems(), collections.Sized)
+        self.assertIsInstance(d.viewitems(), collections.Iterable)
+        self.assertIsInstance(d.viewitems(), collections.Container)
+
+    def test_copy(self):
+        d = {1: 10, "a": "ABC"}
+        self.assertRaises(TypeError, copy.copy, d.viewkeys())
+        self.assertRaises(TypeError, copy.copy, d.viewvalues())
+        self.assertRaises(TypeError, copy.copy, d.viewitems())
+
+    def test_pickle(self):
+        d = {1: 10, "a": "ABC"}
+        for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+            self.assertRaises((TypeError, pickle.PicklingError),
+                pickle.dumps, d.viewkeys(), proto)
+            self.assertRaises((TypeError, pickle.PicklingError),
+                pickle.dumps, d.viewvalues(), proto)
+            self.assertRaises((TypeError, pickle.PicklingError),
+                pickle.dumps, d.viewitems(), proto)
 
 
 def test_main():
diff --git a/lib/python2.7/test/test_doctest.py b/lib/python2.7/test/test_doctest.py
index c1e4c13..4a341ed 100644
--- a/lib/python2.7/test/test_doctest.py
+++ b/lib/python2.7/test/test_doctest.py
@@ -2582,7 +2582,7 @@
     >>> fn = tempfile.mktemp()
     >>> with open(fn, 'wb') as f:
     ...     f.write('Test:\r\n\r\n  >>> x = 1 + 1\r\n\r\nDone.\r\n')
-    >>> doctest.testfile(fn, False)
+    >>> doctest.testfile(fn, module_relative=False, verbose=False)
     TestResults(failed=0, attempted=1)
     >>> os.remove(fn)
 
@@ -2591,7 +2591,7 @@
     >>> fn = tempfile.mktemp()
     >>> with open(fn, 'wb') as f:
     ...     f.write('Test:\n\n  >>> x = 1 + 1\n\nDone.\n')
-    >>> doctest.testfile(fn, False)
+    >>> doctest.testfile(fn, module_relative=False, verbose=False)
     TestResults(failed=0, attempted=1)
     >>> os.remove(fn)
 
diff --git a/lib/python2.7/test/test_dummy_thread.py b/lib/python2.7/test/test_dummy_thread.py
index d9bdd3c..29a8531 100644
--- a/lib/python2.7/test/test_dummy_thread.py
+++ b/lib/python2.7/test/test_dummy_thread.py
@@ -24,14 +24,14 @@
 
     def test_initlock(self):
         #Make sure locks start locked
-        self.assertTrue(not self.lock.locked(),
+        self.assertFalse(self.lock.locked(),
                         "Lock object is not initialized unlocked.")
 
     def test_release(self):
         # Test self.lock.release()
         self.lock.acquire()
         self.lock.release()
-        self.assertTrue(not self.lock.locked(),
+        self.assertFalse(self.lock.locked(),
                         "Lock object did not release properly.")
 
     def test_improper_release(self):
@@ -46,7 +46,7 @@
     def test_cond_acquire_fail(self):
         #Test acquiring locked lock returns False
         self.lock.acquire(0)
-        self.assertTrue(not self.lock.acquire(0),
+        self.assertFalse(self.lock.acquire(0),
                         "Conditional acquiring of a locked lock incorrectly "
                          "succeeded.")
 
@@ -58,9 +58,9 @@
 
     def test_uncond_acquire_return_val(self):
         #Make sure that an unconditional locking returns True.
-        self.assertTrue(self.lock.acquire(1) is True,
+        self.assertIs(self.lock.acquire(1), True,
                         "Unconditional locking did not return True.")
-        self.assertTrue(self.lock.acquire() is True)
+        self.assertIs(self.lock.acquire(), True)
 
     def test_uncond_acquire_blocking(self):
         #Make sure that unconditional acquiring of a locked lock blocks.
@@ -80,7 +80,7 @@
         end_time = int(time.time())
         if test_support.verbose:
             print "done"
-        self.assertTrue((end_time - start_time) >= DELAY,
+        self.assertGreaterEqual(end_time - start_time, DELAY,
                         "Blocking by unconditional acquiring failed.")
 
 class MiscTests(unittest.TestCase):
@@ -94,7 +94,7 @@
         #Test sanity of _thread.get_ident()
         self.assertIsInstance(_thread.get_ident(), int,
                               "_thread.get_ident() returned a non-integer")
-        self.assertTrue(_thread.get_ident() != 0,
+        self.assertNotEqual(_thread.get_ident(), 0,
                         "_thread.get_ident() returned 0")
 
     def test_LockType(self):
@@ -164,7 +164,7 @@
         time.sleep(DELAY)
         if test_support.verbose:
             print 'done'
-        self.assertTrue(testing_queue.qsize() == thread_count,
+        self.assertEqual(testing_queue.qsize(), thread_count,
                         "Not all %s threads executed properly after %s sec." %
                         (thread_count, DELAY))
 
diff --git a/lib/python2.7/test/test_ensurepip.py b/lib/python2.7/test/test_ensurepip.py
index f671ee3..8645f05 100644
--- a/lib/python2.7/test/test_ensurepip.py
+++ b/lib/python2.7/test/test_ensurepip.py
@@ -216,7 +216,10 @@
             ensurepip._uninstall_helper()
 
         self.run_pip.assert_called_once_with(
-            ["uninstall", "-y", "pip", "setuptools"]
+            [
+                "uninstall", "-y", "--disable-pip-version-check", "pip",
+                "setuptools",
+            ]
         )
 
     @requires_usable_pip
@@ -225,7 +228,10 @@
             ensurepip._uninstall_helper(verbosity=1)
 
         self.run_pip.assert_called_once_with(
-            ["uninstall", "-y", "-v", "pip", "setuptools"]
+            [
+                "uninstall", "-y", "--disable-pip-version-check", "-v", "pip",
+                "setuptools",
+            ]
         )
 
     @requires_usable_pip
@@ -234,7 +240,10 @@
             ensurepip._uninstall_helper(verbosity=2)
 
         self.run_pip.assert_called_once_with(
-            ["uninstall", "-y", "-vv", "pip", "setuptools"]
+            [
+                "uninstall", "-y", "--disable-pip-version-check", "-vv", "pip",
+                "setuptools",
+            ]
         )
 
     @requires_usable_pip
@@ -243,7 +252,10 @@
             ensurepip._uninstall_helper(verbosity=3)
 
         self.run_pip.assert_called_once_with(
-            ["uninstall", "-y", "-vvv", "pip", "setuptools"]
+            [
+                "uninstall", "-y", "--disable-pip-version-check", "-vvv",
+                "pip", "setuptools",
+            ]
         )
 
     @requires_usable_pip
@@ -344,7 +356,10 @@
             ensurepip._uninstall._main([])
 
         self.run_pip.assert_called_once_with(
-            ["uninstall", "-y", "pip", "setuptools"]
+            [
+                "uninstall", "-y", "--disable-pip-version-check", "pip",
+                "setuptools",
+            ]
         )
 
 
diff --git a/lib/python2.7/test/test_exceptions.py b/lib/python2.7/test/test_exceptions.py
index b950031..bc8944d 100644
--- a/lib/python2.7/test/test_exceptions.py
+++ b/lib/python2.7/test/test_exceptions.py
@@ -5,10 +5,15 @@
 import unittest
 import pickle, cPickle
 
-from test.test_support import (TESTFN, unlink, run_unittest, captured_output,
+from test.test_support import (TESTFN, unlink, run_unittest, captured_stderr,
                                check_warnings, cpython_only)
 from test.test_pep352 import ignore_deprecation_warnings
 
+class BrokenStrException(Exception):
+    def __str__(self):
+        raise Exception("str() is broken")
+    __repr__ = __str__  # Python 2's PyErr_WriteUnraisable() uses repr()
+
 # XXX This is not really enough, each *operation* should be tested!
 
 class ExceptionTests(unittest.TestCase):
@@ -375,7 +380,7 @@
         # The test prints an unraisable recursion error when
         # doing "except ValueError", this is because subclass
         # checking has recursion checking too.
-        with captured_output("stderr"):
+        with captured_stderr():
             try:
                 g()
             except RuntimeError:
@@ -448,7 +453,7 @@
             __metaclass__ = Meta
             pass
 
-        with captured_output("stderr") as stderr:
+        with captured_stderr() as stderr:
             try:
                 raise KeyError()
             except MyException, e:
@@ -460,7 +465,7 @@
             else:
                 self.fail("Should have raised KeyError")
 
-        with captured_output("stderr") as stderr:
+        with captured_stderr() as stderr:
             def g():
                 try:
                     return g()
@@ -644,6 +649,62 @@
         self.assertEqual(error5.a, 1)
         self.assertEqual(error5.__doc__, "")
 
+    def test_unraisable(self):
+        # Issue #22836: PyErr_WriteUnraisable() should give sensible reports
+        class BrokenDel:
+            def __del__(self):
+                exc = ValueError("del is broken")
+                # In Python 3, the following line would be in the report:
+                raise exc
+
+        class BrokenRepr(BrokenDel):
+            def __repr__(self):
+                raise AttributeError("repr() is broken")
+
+        class BrokenExceptionDel:
+            def __del__(self):
+                exc = BrokenStrException()
+                # In Python 3, the following line would be in the report:
+                raise exc
+
+        for test_class in (BrokenDel, BrokenRepr, BrokenExceptionDel):
+            obj = test_class()
+            with captured_stderr() as stderr:
+                del obj
+            report = stderr.getvalue()
+            self.assertRegexpMatches(report, "Exception.* ignored")
+            if test_class is BrokenRepr:
+                self.assertIn("<object repr() failed>", report)
+            else:
+                self.assertIn("__del__", report)
+            if test_class is BrokenExceptionDel:
+                self.assertIn("BrokenStrException", report)
+                self.assertIn("<exception repr() failed>", report)
+            else:
+                self.assertIn("ValueError", report)
+                self.assertIn("del is broken", report)
+            self.assertTrue(report.endswith("\n"))
+
+    def test_unhandled(self):
+        # Check for sensible reporting of unhandled exceptions
+        for exc_type in (ValueError, BrokenStrException):
+            try:
+                exc = exc_type("test message")
+                # The following line is included in the traceback report:
+                raise exc
+            except exc_type:
+                with captured_stderr() as stderr:
+                    sys.__excepthook__(*sys.exc_info())
+            report = stderr.getvalue()
+            self.assertIn("test_exceptions.py", report)
+            self.assertIn("raise exc", report)
+            self.assertIn(exc_type.__name__, report)
+            if exc_type is BrokenStrException:
+                self.assertIn("<exception str() failed>", report)
+            else:
+                self.assertIn("test message", report)
+            self.assertTrue(report.endswith("\n"))
+
 
 def test_main():
     run_unittest(ExceptionTests, TestSameStrAndUnicodeMsg)
diff --git a/lib/python2.7/test/test_file.py b/lib/python2.7/test/test_file.py
index 4f2c9ef..1fd7910 100644
--- a/lib/python2.7/test/test_file.py
+++ b/lib/python2.7/test/test_file.py
@@ -88,8 +88,8 @@
     def testErrors(self):
         f = self.f
         self.assertEqual(f.name, TESTFN)
-        self.assertTrue(not f.isatty())
-        self.assertTrue(not f.closed)
+        self.assertFalse(f.isatty())
+        self.assertFalse(f.closed)
 
         if hasattr(f, "readinto"):
             self.assertRaises((IOError, TypeError), f.readinto, "")
diff --git a/lib/python2.7/test/test_fileinput.py b/lib/python2.7/test/test_fileinput.py
index c15ad84..a6f0994 100644
--- a/lib/python2.7/test/test_fileinput.py
+++ b/lib/python2.7/test/test_fileinput.py
@@ -5,7 +5,7 @@
 
 import unittest
 from test.test_support import verbose, TESTFN, run_unittest
-from test.test_support import unlink as safe_unlink
+from test.test_support import unlink as safe_unlink, check_warnings
 import sys, re
 from StringIO import StringIO
 from fileinput import FileInput, hook_encoded
@@ -28,6 +28,42 @@
     for name in names:
         safe_unlink(name)
 
+class LineReader:
+
+    def __init__(self):
+        self._linesread = []
+
+    @property
+    def linesread(self):
+        try:
+            return self._linesread[:]
+        finally:
+            self._linesread = []
+
+    def openhook(self, filename, mode):
+        self.it = iter(filename.splitlines(True))
+        return self
+
+    def readline(self, size=None):
+        line = next(self.it, '')
+        self._linesread.append(line)
+        return line
+
+    def readlines(self, hint=-1):
+        lines = []
+        size = 0
+        while True:
+            line = self.readline()
+            if not line:
+                return lines
+            lines.append(line)
+            size += len(line)
+            if size >= hint:
+                return lines
+
+    def close(self):
+        pass
+
 class BufferSizesTests(unittest.TestCase):
     def test_buffer_sizes(self):
         # First, run the tests with default and teeny buffer size.
@@ -211,10 +247,11 @@
         except ValueError:
             pass
         try:
-            t1 = writeTmp(1, ["A\nB"], mode="wb")
-            fi = FileInput(files=t1, openhook=hook_encoded("rot13"))
+            # UTF-7 is a convenient, seldom used encoding
+            t1 = writeTmp(1, ['+AEE-\n+AEI-'], mode="wb")
+            fi = FileInput(files=t1, openhook=hook_encoded("utf-7"))
             lines = list(fi)
-            self.assertEqual(lines, ["N\n", "O"])
+            self.assertEqual(lines, [u'A\n', u'B'])
         finally:
             remove_tempfiles(t1)
 
@@ -227,7 +264,7 @@
             f.write('\x80')
         self.addCleanup(safe_unlink, TESTFN)
 
-        fi = FileInput(files=TESTFN, openhook=hook_encoded('ascii'), bufsize=8)
+        fi = FileInput(files=TESTFN, openhook=hook_encoded('ascii'))
         # The most likely failure is a UnicodeDecodeError due to the entire
         # file being read when it shouldn't have been.
         self.assertEqual(fi.readline(), u'A\n')
@@ -238,6 +275,38 @@
             list(fi)
         fi.close()
 
+    def test_readline_buffering(self):
+        src = LineReader()
+        fi = FileInput(files=['line1\nline2', 'line3\n'], openhook=src.openhook)
+        self.assertEqual(src.linesread, [])
+        self.assertEqual(fi.readline(), 'line1\n')
+        self.assertEqual(src.linesread, ['line1\n'])
+        self.assertEqual(fi.readline(), 'line2')
+        self.assertEqual(src.linesread, ['line2'])
+        self.assertEqual(fi.readline(), 'line3\n')
+        self.assertEqual(src.linesread, ['', 'line3\n'])
+        self.assertEqual(fi.readline(), '')
+        self.assertEqual(src.linesread, [''])
+        self.assertEqual(fi.readline(), '')
+        self.assertEqual(src.linesread, [])
+        fi.close()
+
+    def test_iteration_buffering(self):
+        src = LineReader()
+        fi = FileInput(files=['line1\nline2', 'line3\n'], openhook=src.openhook)
+        self.assertEqual(src.linesread, [])
+        self.assertEqual(next(fi), 'line1\n')
+        self.assertEqual(src.linesread, ['line1\n'])
+        self.assertEqual(next(fi), 'line2')
+        self.assertEqual(src.linesread, ['line2'])
+        self.assertEqual(next(fi), 'line3\n')
+        self.assertEqual(src.linesread, ['', 'line3\n'])
+        self.assertRaises(StopIteration, next, fi)
+        self.assertEqual(src.linesread, [''])
+        self.assertRaises(StopIteration, next, fi)
+        self.assertEqual(src.linesread, [])
+        fi.close()
+
 class Test_hook_encoded(unittest.TestCase):
     """Unit tests for fileinput.hook_encoded()"""
 
diff --git a/lib/python2.7/test/test_fileio.py b/lib/python2.7/test/test_fileio.py
index e21e47f..8fdad14 100644
--- a/lib/python2.7/test/test_fileio.py
+++ b/lib/python2.7/test/test_fileio.py
@@ -112,15 +112,15 @@
 
     def testErrors(self):
         f = self.f
-        self.assertTrue(not f.isatty())
-        self.assertTrue(not f.closed)
+        self.assertFalse(f.isatty())
+        self.assertFalse(f.closed)
         #self.assertEqual(f.name, TESTFN)
         self.assertRaises(ValueError, f.read, 10) # Open for reading
         f.close()
         self.assertTrue(f.closed)
         f = _FileIO(TESTFN, 'r')
         self.assertRaises(TypeError, f.readinto, "")
-        self.assertTrue(not f.closed)
+        self.assertFalse(f.closed)
         f.close()
         self.assertTrue(f.closed)
 
diff --git a/lib/python2.7/test/test_float.py b/lib/python2.7/test/test_float.py
index 5bf1d31..c917c1e 100644
--- a/lib/python2.7/test/test_float.py
+++ b/lib/python2.7/test/test_float.py
@@ -27,6 +27,12 @@
 test_dir = os.path.dirname(__file__) or os.curdir
 format_testfile = os.path.join(test_dir, 'formatfloat_testcases.txt')
 
+class FloatSubclass(float):
+    pass
+
+class OtherFloatSubclass(float):
+    pass
+
 class GeneralFloatCases(unittest.TestCase):
 
     def test_float(self):
@@ -53,6 +59,39 @@
         float('.' + '1'*1000)
         float(unicode('.' + '1'*1000))
 
+    def test_non_numeric_input_types(self):
+        # Test possible non-numeric types for the argument x, including
+        # subclasses of the explicitly documented accepted types.
+        class CustomStr(str): pass
+        class CustomByteArray(bytearray): pass
+        factories = [str, bytearray, CustomStr, CustomByteArray, buffer]
+
+        if test_support.have_unicode:
+            class CustomUnicode(unicode): pass
+            factories += [unicode, CustomUnicode]
+
+        for f in factories:
+            with test_support.check_py3k_warnings(quiet=True):
+                x = f(" 3.14  ")
+            msg = 'x has value %s and type %s' % (x, type(x).__name__)
+            try:
+                self.assertEqual(float(x), 3.14, msg=msg)
+            except TypeError, err:
+                raise AssertionError('For %s got TypeError: %s' %
+                                     (type(x).__name__, err))
+            errmsg = "could not convert"
+            with self.assertRaisesRegexp(ValueError, errmsg, msg=msg), \
+                 test_support.check_py3k_warnings(quiet=True):
+                float(f('A' * 0x10))
+
+    def test_float_buffer(self):
+        with test_support.check_py3k_warnings():
+            self.assertEqual(float(buffer('12.3', 1, 3)), 2.3)
+            self.assertEqual(float(buffer('12.3\x00', 1, 3)), 2.3)
+            self.assertEqual(float(buffer('12.3 ', 1, 3)), 2.3)
+            self.assertEqual(float(buffer('12.3A', 1, 3)), 2.3)
+            self.assertEqual(float(buffer('12.34', 1, 3)), 2.3)
+
     def check_conversion_to_int(self, x):
         """Check that int(x) has the correct value and type, for a float x."""
         n = int(x)
@@ -170,6 +209,15 @@
                 return ""
         self.assertRaises(TypeError, time.sleep, Foo5())
 
+        # Issue #24731
+        class F:
+            def __float__(self):
+                return OtherFloatSubclass(42.)
+        self.assertAlmostEqual(float(F()), 42.)
+        self.assertIs(type(float(F())), OtherFloatSubclass)
+        self.assertAlmostEqual(FloatSubclass(F()), 42.)
+        self.assertIs(type(FloatSubclass(F())), FloatSubclass)
+
     def test_is_integer(self):
         self.assertFalse((1.1).is_integer())
         self.assertTrue((1.).is_integer())
diff --git a/lib/python2.7/test/test_functools.py b/lib/python2.7/test/test_functools.py
index 7b3cb96..69176f4 100644
--- a/lib/python2.7/test/test_functools.py
+++ b/lib/python2.7/test/test_functools.py
@@ -1,3 +1,4 @@
+import copy
 import functools
 import sys
 import unittest
@@ -25,6 +26,16 @@
     """ return the signature of a partial object """
     return (part.func, part.args, part.keywords, part.__dict__)
 
+class MyTuple(tuple):
+    pass
+
+class BadTuple(tuple):
+    def __add__(self, other):
+        return list(self) + list(other)
+
+class MyDict(dict):
+    pass
+
 class TestPartial(unittest.TestCase):
 
     thetype = functools.partial
@@ -146,11 +157,84 @@
         self.assertEqual(join(data), '0123456789')
 
     def test_pickle(self):
-        f = self.thetype(signature, 'asdf', bar=True)
-        f.add_something_to__dict__ = True
+        f = self.thetype(signature, ['asdf'], bar=[True])
+        f.attr = []
         for proto in range(pickle.HIGHEST_PROTOCOL + 1):
             f_copy = pickle.loads(pickle.dumps(f, proto))
-            self.assertEqual(signature(f), signature(f_copy))
+            self.assertEqual(signature(f_copy), signature(f))
+
+    def test_copy(self):
+        f = self.thetype(signature, ['asdf'], bar=[True])
+        f.attr = []
+        f_copy = copy.copy(f)
+        self.assertEqual(signature(f_copy), signature(f))
+        self.assertIs(f_copy.attr, f.attr)
+        self.assertIs(f_copy.args, f.args)
+        self.assertIs(f_copy.keywords, f.keywords)
+
+    def test_deepcopy(self):
+        f = self.thetype(signature, ['asdf'], bar=[True])
+        f.attr = []
+        f_copy = copy.deepcopy(f)
+        self.assertEqual(signature(f_copy), signature(f))
+        self.assertIsNot(f_copy.attr, f.attr)
+        self.assertIsNot(f_copy.args, f.args)
+        self.assertIsNot(f_copy.args[0], f.args[0])
+        self.assertIsNot(f_copy.keywords, f.keywords)
+        self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
+
+    def test_setstate(self):
+        f = self.thetype(signature)
+        f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
+        self.assertEqual(signature(f),
+                         (capture, (1,), dict(a=10), dict(attr=[])))
+        self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
+
+        f.__setstate__((capture, (1,), dict(a=10), None))
+        self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
+        self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
+
+        f.__setstate__((capture, (1,), None, None))
+        #self.assertEqual(signature(f), (capture, (1,), {}, {}))
+        self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
+        self.assertEqual(f(2), ((1, 2), {}))
+        self.assertEqual(f(), ((1,), {}))
+
+        f.__setstate__((capture, (), {}, None))
+        self.assertEqual(signature(f), (capture, (), {}, {}))
+        self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
+        self.assertEqual(f(2), ((2,), {}))
+        self.assertEqual(f(), ((), {}))
+
+    def test_setstate_errors(self):
+        f = self.thetype(signature)
+        self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
+        self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
+        self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
+        self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
+        self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
+        self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
+        self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
+
+    def test_setstate_subclasses(self):
+        f = self.thetype(signature)
+        f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
+        s = signature(f)
+        self.assertEqual(s, (capture, (1,), dict(a=10), {}))
+        self.assertIs(type(s[1]), tuple)
+        self.assertIs(type(s[2]), dict)
+        r = f()
+        self.assertEqual(r, ((1,), {'a': 10}))
+        self.assertIs(type(r[0]), tuple)
+        self.assertIs(type(r[1]), dict)
+
+        f.__setstate__((capture, BadTuple((1,)), {}, None))
+        s = signature(f)
+        self.assertEqual(s, (capture, (1,), {}, {}))
+        self.assertIs(type(s[1]), tuple)
+        r = f(2)
+        self.assertEqual(r, ((1, 2), {}))
+        self.assertIs(type(r[0]), tuple)
 
     # Issue 6083: Reference counting bug
     def test_setstate_refcount(self):
@@ -167,7 +251,7 @@
                 raise IndexError
 
         f = self.thetype(object)
-        self.assertRaises(SystemError, f.__setstate__, BadSequence())
+        self.assertRaises(TypeError, f.__setstate__, BadSequence())
 
 class PartialSubclass(functools.partial):
     pass
@@ -181,7 +265,14 @@
     thetype = PythonPartial
 
     # the python version isn't picklable
-    test_pickle = test_setstate_refcount = None
+    test_pickle = None
+    test_setstate = None
+    test_setstate_errors = None
+    test_setstate_subclasses = None
+    test_setstate_refcount = None
+
+    # the python version isn't deepcopyable
+    test_deepcopy = None
 
     # the python version isn't a type
     test_attributes = None
diff --git a/lib/python2.7/test/test_gdb.py b/lib/python2.7/test/test_gdb.py
index 9656010..f157eae 100644
--- a/lib/python2.7/test/test_gdb.py
+++ b/lib/python2.7/test/test_gdb.py
@@ -10,21 +10,43 @@
 import unittest
 import sysconfig
 
+from test import test_support
 from test.test_support import run_unittest, findfile
 
+# Is this Python configured to support threads?
 try:
-    gdb_version, _ = subprocess.Popen(["gdb", "-nx", "--version"],
-                                      stdout=subprocess.PIPE).communicate()
-except OSError:
-    # This is what "no gdb" looks like.  There may, however, be other
-    # errors that manifest this way too.
-    raise unittest.SkipTest("Couldn't find gdb on the path")
-gdb_version_number = re.search("^GNU gdb [^\d]*(\d+)\.(\d)", gdb_version)
-gdb_major_version = int(gdb_version_number.group(1))
-gdb_minor_version = int(gdb_version_number.group(2))
+    import thread
+except ImportError:
+    thread = None
+
+def get_gdb_version():
+    try:
+        proc = subprocess.Popen(["gdb", "-nx", "--version"],
+                                stdout=subprocess.PIPE,
+                                universal_newlines=True)
+        version = proc.communicate()[0]
+    except OSError:
+        # This is what "no gdb" looks like.  There may, however, be other
+        # errors that manifest this way too.
+        raise unittest.SkipTest("Couldn't find gdb on the path")
+
+    # Regex to parse:
+    # 'GNU gdb (GDB; SUSE Linux Enterprise 12) 7.7\n' -> 7.7
+    # 'GNU gdb (GDB) Fedora 7.9.1-17.fc22\n' -> 7.9
+    # 'GNU gdb 6.1.1 [FreeBSD]\n' -> 6.1
+    # 'GNU gdb (GDB) Fedora (7.5.1-37.fc18)\n' -> 7.5
+    match = re.search(r"^GNU gdb.*?\b(\d+)\.(\d+)", version)
+    if match is None:
+        raise Exception("unable to parse GDB version: %r" % version)
+    return (version, int(match.group(1)), int(match.group(2)))
+
+gdb_version, gdb_major_version, gdb_minor_version = get_gdb_version()
 if gdb_major_version < 7:
-    raise unittest.SkipTest("gdb versions before 7.0 didn't support python embedding"
-                            " Saw:\n" + gdb_version)
+    raise unittest.SkipTest("gdb versions before 7.0 didn't support python "
+                            "embedding. Saw %s.%s:\n%s"
+                            % (gdb_major_version, gdb_minor_version,
+                               gdb_version))
+
 if sys.platform.startswith("sunos"):
     raise unittest.SkipTest("test doesn't work very well on Solaris")
 
@@ -49,6 +71,8 @@
     if (gdb_major_version, gdb_minor_version) >= (7, 4):
         base_cmd += ('-iex', 'add-auto-load-safe-path ' + checkout_hook_path)
     out, err = subprocess.Popen(base_cmd + args,
+        # Redirect stdin to prevent GDB from messing with terminal settings
+        stdin=subprocess.PIPE,
         stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env,
         ).communicate()
     return out, err
@@ -713,20 +737,133 @@
 class PyBtTests(DebuggerTests):
     @unittest.skipIf(python_is_optimized(),
                      "Python was compiled with optimizations")
-    def test_basic_command(self):
+    def test_bt(self):
         'Verify that the "py-bt" command works'
         bt = self.get_stack_trace(script=self.get_sample_script(),
                                   cmds_after_breakpoint=['py-bt'])
         self.assertMultilineMatches(bt,
                                     r'''^.*
-#[0-9]+ Frame 0x[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
+Traceback \(most recent call first\):
+  File ".*gdb_sample.py", line 10, in baz
+    print\(42\)
+  File ".*gdb_sample.py", line 7, in bar
     baz\(a, b, c\)
-#[0-9]+ Frame 0x[0-9a-f]+, for file .*gdb_sample.py, line 4, in foo \(a=1, b=2, c=3\)
+  File ".*gdb_sample.py", line 4, in foo
     bar\(a, b, c\)
-#[0-9]+ Frame 0x[0-9a-f]+, for file .*gdb_sample.py, line 12, in <module> \(\)
+  File ".*gdb_sample.py", line 12, in <module>
     foo\(1, 2, 3\)
 ''')
 
+    @unittest.skipIf(python_is_optimized(),
+                     "Python was compiled with optimizations")
+    def test_bt_full(self):
+        'Verify that the "py-bt-full" command works'
+        bt = self.get_stack_trace(script=self.get_sample_script(),
+                                  cmds_after_breakpoint=['py-bt-full'])
+        self.assertMultilineMatches(bt,
+                                    r'''^.*
+#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
+    baz\(a, b, c\)
+#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 4, in foo \(a=1, b=2, c=3\)
+    bar\(a, b, c\)
+#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 12, in <module> \(\)
+    foo\(1, 2, 3\)
+''')
+
+    @unittest.skipUnless(thread,
+                         "Python was compiled without thread support")
+    def test_threads(self):
+        'Verify that "py-bt" indicates threads that are waiting for the GIL'
+        cmd = '''
+from threading import Thread
+
+class TestThread(Thread):
+    # These threads would run forever, but we'll interrupt things with the
+    # debugger
+    def run(self):
+        i = 0
+        while 1:
+             i += 1
+
+t = {}
+for i in range(4):
+   t[i] = TestThread()
+   t[i].start()
+
+# Trigger a breakpoint on the main thread
+print 42
+
+'''
+        # Verify with "py-bt":
+        gdb_output = self.get_stack_trace(cmd,
+                                          cmds_after_breakpoint=['thread apply all py-bt'])
+        self.assertIn('Waiting for the GIL', gdb_output)
+
+        # Verify with "py-bt-full":
+        gdb_output = self.get_stack_trace(cmd,
+                                          cmds_after_breakpoint=['thread apply all py-bt-full'])
+        self.assertIn('Waiting for the GIL', gdb_output)
+
+    @unittest.skipIf(python_is_optimized(),
+                     "Python was compiled with optimizations")
+    # Some older versions of gdb will fail with
+    #  "Cannot find new threads: generic error"
+    # unless we add LD_PRELOAD=PATH-TO-libpthread.so.1 as a workaround
+    @unittest.skipUnless(thread,
+                         "Python was compiled without thread support")
+    def test_gc(self):
+        'Verify that "py-bt" indicates if a thread is garbage-collecting'
+        cmd = ('from gc import collect\n'
+               'print 42\n'
+               'def foo():\n'
+               '    collect()\n'
+               'def bar():\n'
+               '    foo()\n'
+               'bar()\n')
+        # Verify with "py-bt":
+        gdb_output = self.get_stack_trace(cmd,
+                                          cmds_after_breakpoint=['break update_refs', 'continue', 'py-bt'],
+                                          )
+        self.assertIn('Garbage-collecting', gdb_output)
+
+        # Verify with "py-bt-full":
+        gdb_output = self.get_stack_trace(cmd,
+                                          cmds_after_breakpoint=['break update_refs', 'continue', 'py-bt-full'],
+                                          )
+        self.assertIn('Garbage-collecting', gdb_output)
+
+    @unittest.skipIf(python_is_optimized(),
+                     "Python was compiled with optimizations")
+    # Some older versions of gdb will fail with
+    #  "Cannot find new threads: generic error"
+    # unless we add LD_PRELOAD=PATH-TO-libpthread.so.1 as a workaround
+    @unittest.skipUnless(thread,
+                         "Python was compiled without thread support")
+    def test_pycfunction(self):
+        'Verify that "py-bt" displays invocations of PyCFunction instances'
+        # Tested function must not be defined with METH_NOARGS or METH_O,
+        # otherwise call_function() doesn't call PyCFunction_Call()
+        cmd = ('from time import gmtime\n'
+               'def foo():\n'
+               '    gmtime(1)\n'
+               'def bar():\n'
+               '    foo()\n'
+               'bar()\n')
+        # Verify with "py-bt":
+        gdb_output = self.get_stack_trace(cmd,
+                                          breakpoint='time_gmtime',
+                                          cmds_after_breakpoint=['bt', 'py-bt'],
+                                          )
+        self.assertIn('<built-in function gmtime', gdb_output)
+
+        # Verify with "py-bt-full":
+        gdb_output = self.get_stack_trace(cmd,
+                                          breakpoint='time_gmtime',
+                                          cmds_after_breakpoint=['py-bt-full'],
+                                          )
+        self.assertIn('#0 <built-in function gmtime', gdb_output)
+
+
 class PyPrintTests(DebuggerTests):
     @unittest.skipIf(python_is_optimized(),
                      "Python was compiled with optimizations")
@@ -781,6 +918,10 @@
                                     r".*\na = 1\nb = 2\nc = 3\n.*")
 
 def test_main():
+    if test_support.verbose:
+        print("GDB version %s.%s:" % (gdb_major_version, gdb_minor_version))
+        for line in gdb_version.splitlines():
+            print(" " * 4 + line)
     run_unittest(PrettyPrintTests,
                  PyListTests,
                  StackNavigationTests,
diff --git a/lib/python2.7/test/test_getargs2.py b/lib/python2.7/test/test_getargs2.py
index aba304a..e42f5ff 100644
--- a/lib/python2.7/test/test_getargs2.py
+++ b/lib/python2.7/test/test_getargs2.py
@@ -331,9 +331,293 @@
         else:
             self.fail('TypeError should have been raised')
 
+
+class Bytes_TestCase(unittest.TestCase):
+    def test_c(self):
+        from _testcapi import getargs_c
+        self.assertRaises(TypeError, getargs_c, 'abc')  # len > 1
+        self.assertEqual(getargs_c('a'), 97)
+        if test_support.have_unicode:
+            self.assertRaises(TypeError, getargs_c, u's')
+        self.assertRaises(TypeError, getargs_c, bytearray('a'))
+        self.assertRaises(TypeError, getargs_c, memoryview('a'))
+        with test_support.check_py3k_warnings():
+            self.assertRaises(TypeError, getargs_c, buffer('a'))
+        self.assertRaises(TypeError, getargs_c, 97)
+        self.assertRaises(TypeError, getargs_c, None)
+
+    def test_w(self):
+        from _testcapi import getargs_w
+        self.assertRaises(TypeError, getargs_w, 'abc', 3)
+        self.assertRaises(TypeError, getargs_w, u'abc', 3)
+        self.assertRaises(TypeError, getargs_w, bytearray('bytes'), 3)
+        self.assertRaises(TypeError, getargs_w, memoryview('bytes'), 3)
+        self.assertRaises(TypeError, getargs_w,
+                          memoryview(bytearray('bytes')), 3)
+        with test_support.check_py3k_warnings():
+            self.assertRaises(TypeError, getargs_w, buffer('bytes'), 3)
+            self.assertRaises(TypeError, getargs_w,
+                              buffer(bytearray('bytes')), 3)
+        self.assertRaises(TypeError, getargs_w, None, 0)
+
+    def test_w_hash(self):
+        from _testcapi import getargs_w_hash
+        self.assertRaises(TypeError, getargs_w_hash, 'abc')
+        self.assertRaises(TypeError, getargs_w_hash, u'abc')
+        self.assertRaises(TypeError, getargs_w_hash, bytearray('bytes'))
+        self.assertRaises(TypeError, getargs_w_hash, memoryview('bytes'))
+        self.assertRaises(TypeError, getargs_w_hash,
+                          memoryview(bytearray('bytes')))
+        with test_support.check_py3k_warnings():
+            self.assertRaises(TypeError, getargs_w_hash, buffer('bytes'))
+            self.assertRaises(TypeError, getargs_w_hash,
+                              buffer(bytearray('bytes')))
+        self.assertRaises(TypeError, getargs_w_hash, None)
+
+    def test_w_star(self):
+        # getargs_w_star() modifies first and last byte
+        from _testcapi import getargs_w_star
+        self.assertRaises(TypeError, getargs_w_star, 'abc')
+        self.assertRaises(TypeError, getargs_w_star, u'abc')
+        self.assertRaises(TypeError, getargs_w_star, memoryview('bytes'))
+        buf = bytearray('bytearray')
+        self.assertEqual(getargs_w_star(buf), '[ytearra]')
+        self.assertEqual(buf, bytearray('[ytearra]'))
+        buf = bytearray(b'memoryview')
+        self.assertEqual(getargs_w_star(memoryview(buf)), '[emoryvie]')
+        self.assertEqual(buf, bytearray('[emoryvie]'))
+        with test_support.check_py3k_warnings():
+            self.assertRaises(TypeError, getargs_w_star, buffer('buffer'))
+            self.assertRaises(TypeError, getargs_w_star,
+                              buffer(bytearray('buffer')))
+        self.assertRaises(TypeError, getargs_w_star, None)
+
+
+class String_TestCase(unittest.TestCase):
+    def test_s(self):
+        from _testcapi import getargs_s
+        self.assertEqual(getargs_s('abc\xe9'), 'abc\xe9')
+        self.assertEqual(getargs_s(u'abc'), 'abc')
+        self.assertRaises(TypeError, getargs_s, 'nul:\0')
+        self.assertRaises(TypeError, getargs_s, u'nul:\0')
+        self.assertRaises(TypeError, getargs_s, bytearray('bytearray'))
+        self.assertRaises(TypeError, getargs_s, memoryview('memoryview'))
+        with test_support.check_py3k_warnings():
+            self.assertRaises(TypeError, getargs_s, buffer('buffer'))
+        self.assertRaises(TypeError, getargs_s, None)
+
+    def test_s_star(self):
+        from _testcapi import getargs_s_star
+        self.assertEqual(getargs_s_star('abc\xe9'), 'abc\xe9')
+        self.assertEqual(getargs_s_star(u'abc'), 'abc')
+        self.assertEqual(getargs_s_star('nul:\0'), 'nul:\0')
+        self.assertEqual(getargs_s_star(u'nul:\0'), 'nul:\0')
+        self.assertEqual(getargs_s_star(bytearray('abc\xe9')), 'abc\xe9')
+        self.assertEqual(getargs_s_star(memoryview('abc\xe9')), 'abc\xe9')
+        with test_support.check_py3k_warnings():
+            self.assertEqual(getargs_s_star(buffer('abc\xe9')), 'abc\xe9')
+            self.assertEqual(getargs_s_star(buffer(u'abc\xe9')),
+                             str(buffer(u'abc\xe9')))
+        self.assertRaises(TypeError, getargs_s_star, None)
+
+    def test_s_hash(self):
+        from _testcapi import getargs_s_hash
+        self.assertEqual(getargs_s_hash('abc\xe9'), 'abc\xe9')
+        self.assertEqual(getargs_s_hash(u'abc'), 'abc')
+        self.assertEqual(getargs_s_hash('nul:\0'), 'nul:\0')
+        self.assertEqual(getargs_s_hash(u'nul:\0'), 'nul:\0')
+        self.assertRaises(TypeError, getargs_s_hash, bytearray('bytearray'))
+        self.assertRaises(TypeError, getargs_s_hash, memoryview('memoryview'))
+        with test_support.check_py3k_warnings():
+            self.assertEqual(getargs_s_hash(buffer('abc\xe9')), 'abc\xe9')
+            self.assertEqual(getargs_s_hash(buffer(u'abc\xe9')),
+                             str(buffer(u'abc\xe9')))
+        self.assertRaises(TypeError, getargs_s_hash, None)
+
+    def test_t_hash(self):
+        from _testcapi import getargs_t_hash
+        self.assertEqual(getargs_t_hash('abc\xe9'), 'abc\xe9')
+        self.assertEqual(getargs_t_hash(u'abc'), 'abc')
+        self.assertEqual(getargs_t_hash('nul:\0'), 'nul:\0')
+        self.assertEqual(getargs_t_hash(u'nul:\0'), 'nul:\0')
+        self.assertRaises(TypeError, getargs_t_hash, bytearray('bytearray'))
+        self.assertRaises(TypeError, getargs_t_hash, memoryview('memoryview'))
+        with test_support.check_py3k_warnings():
+            self.assertEqual(getargs_t_hash(buffer('abc\xe9')), 'abc\xe9')
+            self.assertEqual(getargs_t_hash(buffer(u'abc')), 'abc')
+        self.assertRaises(TypeError, getargs_t_hash, None)
+
+    def test_z(self):
+        from _testcapi import getargs_z
+        self.assertEqual(getargs_z('abc\xe9'), 'abc\xe9')
+        self.assertEqual(getargs_z(u'abc'), 'abc')
+        self.assertRaises(TypeError, getargs_z, 'nul:\0')
+        self.assertRaises(TypeError, getargs_z, u'nul:\0')
+        self.assertRaises(TypeError, getargs_z, bytearray('bytearray'))
+        self.assertRaises(TypeError, getargs_z, memoryview('memoryview'))
+        with test_support.check_py3k_warnings():
+            self.assertRaises(TypeError, getargs_z, buffer('buffer'))
+        self.assertIsNone(getargs_z(None))
+
+    def test_z_star(self):
+        from _testcapi import getargs_z_star
+        self.assertEqual(getargs_z_star('abc\xe9'), 'abc\xe9')
+        self.assertEqual(getargs_z_star(u'abc'), 'abc')
+        self.assertEqual(getargs_z_star('nul:\0'), 'nul:\0')
+        self.assertEqual(getargs_z_star(u'nul:\0'), 'nul:\0')
+        self.assertEqual(getargs_z_star(bytearray('abc\xe9')), 'abc\xe9')
+        self.assertEqual(getargs_z_star(memoryview('abc\xe9')), 'abc\xe9')
+        with test_support.check_py3k_warnings():
+            self.assertEqual(getargs_z_star(buffer('abc\xe9')), 'abc\xe9')
+            self.assertEqual(getargs_z_star(buffer(u'abc\xe9')),
+                             str(buffer(u'abc\xe9')))
+        self.assertIsNone(getargs_z_star(None))
+
+    def test_z_hash(self):
+        from _testcapi import getargs_z_hash
+        self.assertEqual(getargs_z_hash('abc\xe9'), 'abc\xe9')
+        self.assertEqual(getargs_z_hash(u'abc'), 'abc')
+        self.assertEqual(getargs_z_hash('nul:\0'), 'nul:\0')
+        self.assertEqual(getargs_z_hash(u'nul:\0'), 'nul:\0')
+        self.assertRaises(TypeError, getargs_z_hash, bytearray('bytearray'))
+        self.assertRaises(TypeError, getargs_z_hash, memoryview('memoryview'))
+        with test_support.check_py3k_warnings():
+            self.assertEqual(getargs_z_hash(buffer('abc\xe9')), 'abc\xe9')
+            self.assertEqual(getargs_z_hash(buffer(u'abc\xe9')),
+                             str(buffer(u'abc\xe9')))
+        self.assertIsNone(getargs_z_hash(None))
+
+
+@test_support.requires_unicode
+class Unicode_TestCase(unittest.TestCase):
+    def test_es(self):
+        from _testcapi import getargs_es
+        self.assertEqual(getargs_es('abc'), 'abc')
+        self.assertEqual(getargs_es(u'abc'), 'abc')
+        self.assertEqual(getargs_es('abc', 'ascii'), 'abc')
+        self.assertEqual(getargs_es(u'abc\xe9', 'latin1'), 'abc\xe9')
+        self.assertRaises(UnicodeEncodeError, getargs_es, u'abc\xe9', 'ascii')
+        self.assertRaises(LookupError, getargs_es, u'abc', 'spam')
+        self.assertRaises(TypeError, getargs_es,
+                          bytearray('bytearray'), 'latin1')
+        self.assertRaises(TypeError, getargs_es,
+                          memoryview('memoryview'), 'latin1')
+        with test_support.check_py3k_warnings():
+            self.assertEqual(getargs_es(buffer('abc'), 'ascii'), 'abc')
+            self.assertEqual(getargs_es(buffer(u'abc'), 'ascii'), 'abc')
+        self.assertRaises(TypeError, getargs_es, None, 'latin1')
+        self.assertRaises(TypeError, getargs_es, 'nul:\0', 'latin1')
+        self.assertRaises(TypeError, getargs_es, u'nul:\0', 'latin1')
+
+    def test_et(self):
+        from _testcapi import getargs_et
+        self.assertEqual(getargs_et('abc\xe9'), 'abc\xe9')
+        self.assertEqual(getargs_et(u'abc'), 'abc')
+        self.assertEqual(getargs_et('abc', 'ascii'), 'abc')
+        self.assertEqual(getargs_et('abc\xe9', 'ascii'), 'abc\xe9')
+        self.assertEqual(getargs_et(u'abc\xe9', 'latin1'), 'abc\xe9')
+        self.assertRaises(UnicodeEncodeError, getargs_et, u'abc\xe9', 'ascii')
+        self.assertRaises(LookupError, getargs_et, u'abc', 'spam')
+        self.assertRaises(TypeError, getargs_et,
+                          bytearray('bytearray'), 'latin1')
+        self.assertRaises(TypeError, getargs_et,
+                          memoryview('memoryview'), 'latin1')
+        with test_support.check_py3k_warnings():
+            self.assertEqual(getargs_et(buffer('abc'), 'ascii'), 'abc')
+            self.assertEqual(getargs_et(buffer(u'abc'), 'ascii'), 'abc')
+        self.assertRaises(TypeError, getargs_et, None, 'latin1')
+        self.assertRaises(TypeError, getargs_et, 'nul:\0', 'latin1')
+        self.assertRaises(TypeError, getargs_et, u'nul:\0', 'latin1')
+
+    def test_es_hash(self):
+        from _testcapi import getargs_es_hash
+        self.assertEqual(getargs_es_hash('abc'), 'abc')
+        self.assertEqual(getargs_es_hash(u'abc'), 'abc')
+        self.assertEqual(getargs_es_hash(u'abc\xe9', 'latin1'), 'abc\xe9')
+        self.assertRaises(UnicodeEncodeError, getargs_es_hash, u'abc\xe9', 'ascii')
+        self.assertRaises(LookupError, getargs_es_hash, u'abc', 'spam')
+        self.assertRaises(TypeError, getargs_es_hash,
+                          bytearray('bytearray'), 'latin1')
+        self.assertRaises(TypeError, getargs_es_hash,
+                          memoryview('memoryview'), 'latin1')
+        with test_support.check_py3k_warnings():
+            self.assertEqual(getargs_es_hash(buffer('abc'), 'ascii'), 'abc')
+            self.assertEqual(getargs_es_hash(buffer(u'abc'), 'ascii'), 'abc')
+        self.assertRaises(TypeError, getargs_es_hash, None, 'latin1')
+        self.assertEqual(getargs_es_hash('nul:\0', 'latin1'), 'nul:\0')
+        self.assertEqual(getargs_es_hash(u'nul:\0', 'latin1'), 'nul:\0')
+
+        buf = bytearray('x'*8)
+        self.assertEqual(getargs_es_hash(u'abc\xe9', 'latin1', buf), 'abc\xe9')
+        self.assertEqual(buf, bytearray('abc\xe9\x00xxx'))
+        buf = bytearray('x'*5)
+        self.assertEqual(getargs_es_hash(u'abc\xe9', 'latin1', buf), 'abc\xe9')
+        self.assertEqual(buf, bytearray('abc\xe9\x00'))
+        buf = bytearray('x'*4)
+        self.assertRaises(TypeError, getargs_es_hash, u'abc\xe9', 'latin1', buf)
+        self.assertEqual(buf, bytearray('x'*4))
+        buf = bytearray()
+        self.assertRaises(TypeError, getargs_es_hash, u'abc\xe9', 'latin1', buf)
+
+    def test_et_hash(self):
+        from _testcapi import getargs_et_hash
+        self.assertEqual(getargs_et_hash('abc\xe9'), 'abc\xe9')
+        self.assertEqual(getargs_et_hash(u'abc'), 'abc')
+        self.assertEqual(getargs_et_hash('abc\xe9', 'ascii'), 'abc\xe9')
+        self.assertEqual(getargs_et_hash(u'abc\xe9', 'latin1'), 'abc\xe9')
+        self.assertRaises(UnicodeEncodeError, getargs_et_hash,
+                          u'abc\xe9', 'ascii')
+        self.assertRaises(LookupError, getargs_et_hash, u'abc', 'spam')
+        self.assertRaises(TypeError, getargs_et_hash,
+                          bytearray('bytearray'), 'latin1')
+        self.assertRaises(TypeError, getargs_et_hash,
+                          memoryview('memoryview'), 'latin1')
+        with test_support.check_py3k_warnings():
+            self.assertEqual(getargs_et_hash(buffer('abc'), 'ascii'), 'abc')
+            self.assertEqual(getargs_et_hash(buffer(u'abc'), 'ascii'), 'abc')
+        self.assertRaises(TypeError, getargs_et_hash, None, 'latin1')
+        self.assertEqual(getargs_et_hash('nul:\0', 'latin1'), 'nul:\0')
+        self.assertEqual(getargs_et_hash(u'nul:\0', 'latin1'), 'nul:\0')
+
+        buf = bytearray('x'*8)
+        self.assertEqual(getargs_et_hash(u'abc\xe9', 'latin1', buf), 'abc\xe9')
+        self.assertEqual(buf, bytearray('abc\xe9\x00xxx'))
+        buf = bytearray('x'*5)
+        self.assertEqual(getargs_et_hash(u'abc\xe9', 'latin1', buf), 'abc\xe9')
+        self.assertEqual(buf, bytearray('abc\xe9\x00'))
+        buf = bytearray('x'*4)
+        self.assertRaises(TypeError, getargs_et_hash, u'abc\xe9', 'latin1', buf)
+        self.assertEqual(buf, bytearray('x'*4))
+        buf = bytearray()
+        self.assertRaises(TypeError, getargs_et_hash, u'abc\xe9', 'latin1', buf)
+
+    def test_u(self):
+        from _testcapi import getargs_u
+        self.assertEqual(getargs_u(u'abc\xe9'), u'abc\xe9')
+        self.assertEqual(getargs_u(u'nul:\0'), u'nul:')
+        self.assertRaises(TypeError, getargs_u, 'bytes')
+        self.assertRaises(TypeError, getargs_u, bytearray('bytearray'))
+        self.assertRaises(TypeError, getargs_u, memoryview('memoryview'))
+        with test_support.check_py3k_warnings():
+            self.assertRaises(TypeError, getargs_u, buffer('buffer'))
+        self.assertRaises(TypeError, getargs_u, None)
+
+    def test_u_hash(self):
+        from _testcapi import getargs_u_hash
+        self.assertEqual(getargs_u_hash(u'abc\xe9'), u'abc\xe9')
+        self.assertEqual(getargs_u_hash(u'nul:\0'), u'nul:\0')
+        self.assertRaises(TypeError, getargs_u_hash, 'bytes')
+        self.assertRaises(TypeError, getargs_u_hash, bytearray('bytearray'))
+        self.assertRaises(TypeError, getargs_u_hash, memoryview('memoryview'))
+        with test_support.check_py3k_warnings():
+            self.assertRaises(TypeError, getargs_u_hash, buffer('buffer'))
+        self.assertRaises(TypeError, getargs_u_hash, None)
+
+
 def test_main():
     tests = [Signed_TestCase, Unsigned_TestCase, LongLong_TestCase,
-             Tuple_TestCase, Keywords_TestCase]
+             Tuple_TestCase, Keywords_TestCase,
+             Bytes_TestCase, String_TestCase, Unicode_TestCase]
     test_support.run_unittest(*tests)
 
 if __name__ == "__main__":
diff --git a/lib/python2.7/test/test_hotshot.py b/lib/python2.7/test/test_hotshot.py
index 7da9746..9f4b798 100644
--- a/lib/python2.7/test/test_hotshot.py
+++ b/lib/python2.7/test/test_hotshot.py
@@ -149,6 +149,10 @@
         stats.load(self.logfn)
         os.unlink(self.logfn)
 
+    def test_large_info(self):
+        p = self.new_profiler()
+        self.assertRaises(ValueError, p.addinfo, "A", "A" * 0xfceb)
+
 
 def test_main():
     test_support.run_unittest(HotShotTestCase)
diff --git a/lib/python2.7/test/test_httplib.py b/lib/python2.7/test/test_httplib.py
index fc7c571..a72f6f7 100644
--- a/lib/python2.7/test/test_httplib.py
+++ b/lib/python2.7/test/test_httplib.py
@@ -5,6 +5,7 @@
 import socket
 import errno
 import os
+import tempfile
 
 import unittest
 TestCase = unittest.TestCase
@@ -399,6 +400,22 @@
         conn.sock = sock
         conn.request('GET', '/foo', body)
         self.assertTrue(sock.data.startswith(expected))
+        self.assertIn('def test_send_file', sock.data)
+
+    def test_send_tempfile(self):
+        expected = ('GET /foo HTTP/1.1\r\nHost: example.com\r\n'
+                    'Accept-Encoding: identity\r\nContent-Length: 9\r\n\r\n'
+                    'fake\ndata')
+
+        with tempfile.TemporaryFile() as body:
+            body.write('fake\ndata')
+            body.seek(0)
+
+            conn = httplib.HTTPConnection('example.com')
+            sock = FakeSocket(body)
+            conn.sock = sock
+            conn.request('GET', '/foo', body)
+        self.assertEqual(sock.data, expected)
 
     def test_send(self):
         expected = 'this is a test this is only a test'
@@ -561,6 +578,16 @@
         #self.assertTrue(response[0].closed)
         self.assertTrue(conn.sock.file_closed)
 
+    def test_proxy_tunnel_without_status_line(self):
+        # Issue 17849: If a proxy tunnel is created that does not return
+        # a status code, fail.
+        body = 'hello world'
+        conn = httplib.HTTPConnection('example.com', strict=False)
+        conn.set_tunnel('foo')
+        conn.sock = FakeSocket(body)
+        with self.assertRaisesRegexp(socket.error, "Invalid response"):
+            conn._tunnel()
+
 class OfflineTest(TestCase):
     def test_responses(self):
         self.assertEqual(httplib.responses[httplib.NOT_FOUND], "Not Found")
@@ -825,10 +852,12 @@
 
         self.assertEqual(conn.sock.host, 'proxy.com')
         self.assertEqual(conn.sock.port, 80)
-        self.assertTrue('CONNECT destination.com' in conn.sock.data)
-        self.assertTrue('Host: destination.com' in conn.sock.data)
+        self.assertIn('CONNECT destination.com', conn.sock.data)
+        # issue22095
+        self.assertNotIn('Host: destination.com:None', conn.sock.data)
+        self.assertIn('Host: destination.com', conn.sock.data)
 
-        self.assertTrue('Host: proxy.com' not in conn.sock.data)
+        self.assertNotIn('Host: proxy.com', conn.sock.data)
 
         conn.close()
 
diff --git a/lib/python2.7/test/test_httpservers.py b/lib/python2.7/test/test_httpservers.py
index 706dfc7..c84f48f 100644
--- a/lib/python2.7/test/test_httpservers.py
+++ b/lib/python2.7/test/test_httpservers.py
@@ -381,6 +381,16 @@
                           form.getfirst("bacon"))
 """
 
+cgi_file4 = """\
+#!%s
+import os
+
+print("Content-type: text/html")
+print()
+
+print(os.environ["%s"])
+"""
+
 
 @unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
         "This test can't be run reliably as root (issue #13308).")
@@ -424,6 +434,11 @@
             file3.write(cgi_file1 % self.pythonexe)
         os.chmod(self.file3_path, 0777)
 
+        self.file4_path = os.path.join(self.cgi_dir, 'file4.py')
+        with open(self.file4_path, 'w') as file4:
+            file4.write(cgi_file4 % (self.pythonexe, 'QUERY_STRING'))
+        os.chmod(self.file4_path, 0o777)
+
         self.cwd = os.getcwd()
         os.chdir(self.parent_dir)
 
@@ -436,6 +451,7 @@
             os.remove(self.file1_path)
             os.remove(self.file2_path)
             os.remove(self.file3_path)
+            os.remove(self.file4_path)
             os.rmdir(self.cgi_child_dir)
             os.rmdir(self.cgi_dir)
             os.rmdir(self.parent_dir)
@@ -536,6 +552,19 @@
         self.assertEqual((b'Hello World\n', 'text/html', 200),
                 (res.read(), res.getheader('Content-type'), res.status))
 
+    def test_query_with_multiple_question_mark(self):
+        res = self.request('/cgi-bin/file4.py?a=b?c=d')
+        self.assertEqual(
+            (b'a=b?c=d\n', 'text/html', 200),
+            (res.read(), res.getheader('Content-type'), res.status))
+
+    def test_query_with_continuous_slashes(self):
+        res = self.request('/cgi-bin/file4.py?k=aa%2F%2Fbb&//q//p//=//a//b//')
+        self.assertEqual(
+            (b'k=aa%2F%2Fbb&//q//p//=//a//b//\n',
+             'text/html', 200),
+            (res.read(), res.getheader('Content-type'), res.status))
+
 
 class SimpleHTTPRequestHandlerTestCase(unittest.TestCase):
     """ Test url parsing """
diff --git a/lib/python2.7/test/test_imageop.py b/lib/python2.7/test/test_imageop.py
index 31edbd1..9589bf2 100644
--- a/lib/python2.7/test/test_imageop.py
+++ b/lib/python2.7/test/test_imageop.py
@@ -61,7 +61,9 @@
         self.check("rgb82rgb")
         self.check("rgb2grey")
         self.check("grey2rgb")
-
+        # Issue #24264: Buffer overflow
+        with self.assertRaises(imageop.error):
+            imageop.grey2rgb('A'*256, 1, 129)
 
 def test_main():
 
diff --git a/lib/python2.7/test/test_inspect.py b/lib/python2.7/test/test_inspect.py
index 4130cd0..ecc04cb 100644
--- a/lib/python2.7/test/test_inspect.py
+++ b/lib/python2.7/test/test_inspect.py
@@ -8,7 +8,7 @@
 from UserList import UserList
 from UserDict import UserDict
 
-from test.test_support import run_unittest, check_py3k_warnings
+from test.test_support import run_unittest, check_py3k_warnings, have_unicode
 
 with check_py3k_warnings(
         ("tuple parameter unpacking has been removed", SyntaxWarning),
@@ -17,7 +17,10 @@
     from test import inspect_fodder2 as mod2
 
 # C module for test_findsource_binary
-import unicodedata
+try:
+    import unicodedata
+except ImportError:
+    unicodedata = None
 
 # Functions tested in this suite:
 # ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
@@ -798,7 +801,8 @@
             self.assertEqualException(f, '2, c=3')
             self.assertEqualException(f, '2, 3, c=4')
             self.assertEqualException(f, '2, c=4, b=3')
-            self.assertEqualException(f, '**{u"\u03c0\u03b9": 4}')
+            if have_unicode:
+                self.assertEqualException(f, '**{u"\u03c0\u03b9": 4}')
             # f got multiple values for keyword argument
             self.assertEqualException(f, '1, a=2')
             self.assertEqualException(f, '1, **{"a":2}')
diff --git a/lib/python2.7/test/test_int.py b/lib/python2.7/test/test_int.py
index 365f9a2..ea5c0e3 100644
--- a/lib/python2.7/test/test_int.py
+++ b/lib/python2.7/test/test_int.py
@@ -45,6 +45,9 @@
         (unichr(0x200), ValueError),
 ]
 
+class IntSubclass(int):
+    pass
+
 class IntLongCommonTests(object):
 
     """Mixin of test cases to share between both test_int and test_long."""
@@ -340,20 +343,40 @@
         # Test possible valid non-numeric types for x, including subclasses
         # of the allowed built-in types.
         class CustomStr(str): pass
-        values = ['100', CustomStr('100')]
+        class CustomByteArray(bytearray): pass
+        factories = [str, bytearray, CustomStr, CustomByteArray, buffer]
 
         if have_unicode:
             class CustomUnicode(unicode): pass
-            values += [unicode('100'), CustomUnicode(unicode('100'))]
+            factories += [unicode, CustomUnicode]
 
-        for x in values:
+        for f in factories:
+            with test_support.check_py3k_warnings(quiet=True):
+                x = f('100')
             msg = 'x has value %s and type %s' % (x, type(x).__name__)
             try:
                 self.assertEqual(int(x), 100, msg=msg)
-                self.assertEqual(int(x, 2), 4, msg=msg)
+                if isinstance(x, basestring):
+                    self.assertEqual(int(x, 2), 4, msg=msg)
             except TypeError, err:
                 raise AssertionError('For %s got TypeError: %s' %
                                      (type(x).__name__, err))
+            if not isinstance(x, basestring):
+                errmsg = "can't convert non-string"
+                with self.assertRaisesRegexp(TypeError, errmsg, msg=msg):
+                    int(x, 2)
+            errmsg = 'invalid literal'
+            with self.assertRaisesRegexp(ValueError, errmsg, msg=msg), \
+                 test_support.check_py3k_warnings(quiet=True):
+                int(f('A' * 0x10))
+
+    def test_int_buffer(self):
+        with test_support.check_py3k_warnings():
+            self.assertEqual(int(buffer('123', 1, 2)), 23)
+            self.assertEqual(int(buffer('123\x00', 1, 2)), 23)
+            self.assertEqual(int(buffer('123 ', 1, 2)), 23)
+            self.assertEqual(int(buffer('123A', 1, 2)), 23)
+            self.assertEqual(int(buffer('1234', 1, 2)), 23)
 
     def test_error_on_string_float_for_x(self):
         self.assertRaises(ValueError, int, '1.2')
@@ -460,6 +483,18 @@
                     self.fail("Failed to raise TypeError with %s" %
                               ((base, trunc_result_base),))
 
+                class TruncReturnsIntSubclass(base):
+                    def __trunc__(self):
+                        return True
+                good_int = TruncReturnsIntSubclass()
+                n = int(good_int)
+                self.assertEqual(n, 1)
+                self.assertIs(type(n), bool)
+                n = IntSubclass(good_int)
+                self.assertEqual(n, 1)
+                self.assertIs(type(n), IntSubclass)
+
+
 def test_main():
     run_unittest(IntTestCases)
 
diff --git a/lib/python2.7/test/test_io.py b/lib/python2.7/test/test_io.py
index bbc804b..34760c9 100644
--- a/lib/python2.7/test/test_io.py
+++ b/lib/python2.7/test/test_io.py
@@ -15,7 +15,7 @@
 ################################################################################
 # When writing tests for io, it's important to test both the C and Python
 # implementations. This is usually done by writing a base test that refers to
-# the type it is testing as a attribute. Then it provides custom subclasses to
+# the type it is testing as an attribute. Then it provides custom subclasses to
 # test both implementations. This file has lots of examples.
 ################################################################################
 
@@ -426,7 +426,7 @@
         with self.open(support.TESTFN, "ab") as f:
             self.assertEqual(f.tell(), 3)
         with self.open(support.TESTFN, "a") as f:
-            self.assertTrue(f.tell() > 0)
+            self.assertGreater(f.tell(), 0)
 
     def test_destructor(self):
         record = []
@@ -544,7 +544,7 @@
         wr = weakref.ref(f)
         del f
         support.gc_collect()
-        self.assertTrue(wr() is None, wr)
+        self.assertIsNone(wr(), wr)
         with self.open(support.TESTFN, "rb") as f:
             self.assertEqual(f.read(), b"abcxxx")
 
@@ -668,7 +668,7 @@
         del MyIO
         del obj
         support.gc_collect()
-        self.assertTrue(wr() is None, wr)
+        self.assertIsNone(wr(), wr)
 
 class PyIOTest(IOTest):
     test_array_writes = unittest.skip(
@@ -1062,7 +1062,7 @@
         wr = weakref.ref(f)
         del f
         support.gc_collect()
-        self.assertTrue(wr() is None, wr)
+        self.assertIsNone(wr(), wr)
 
     def test_args_error(self):
         # Issue #17275
@@ -1365,7 +1365,7 @@
         wr = weakref.ref(f)
         del f
         support.gc_collect()
-        self.assertTrue(wr() is None, wr)
+        self.assertIsNone(wr(), wr)
         with self.open(support.TESTFN, "rb") as f:
             self.assertEqual(f.read(), b"123xxx")
 
@@ -2001,6 +2001,15 @@
         t.__init__(self.MockRawIO())
         self.assertEqual(t.read(0), u'')
 
+    def test_non_text_encoding_codecs_are_rejected(self):
+        # Ensure the constructor complains if passed a codec that isn't
+        # marked as a text encoding
+        # http://bugs.python.org/issue20404
+        r = self.BytesIO()
+        b = self.BufferedWriter(r)
+        with support.check_py3k_warnings():
+            self.TextIOWrapper(b, encoding="hex_codec")
+
     def test_detach(self):
         r = self.BytesIO()
         b = self.BufferedWriter(r)
@@ -2054,7 +2063,7 @@
         t = self.TextIOWrapper(b, encoding="utf8")
         self.assertEqual(t.encoding, "utf8")
         t = self.TextIOWrapper(b)
-        self.assertTrue(t.encoding is not None)
+        self.assertIsNotNone(t.encoding)
         codecs.lookup(t.encoding)
 
     def test_encoding_errors_reading(self):
@@ -2617,19 +2626,39 @@
 
     def test_illegal_decoder(self):
         # Issue #17106
+        # Bypass the early encoding check added in issue 20404
+        def _make_illegal_wrapper():
+            quopri = codecs.lookup("quopri_codec")
+            quopri._is_text_encoding = True
+            try:
+                t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
+                                       newline='\n', encoding="quopri_codec")
+            finally:
+                quopri._is_text_encoding = False
+            return t
         # Crash when decoder returns non-string
-        t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
-                               encoding='quopri_codec')
+        with support.check_py3k_warnings():
+            t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
+                                   encoding='quopri_codec')
         with self.maybeRaises(TypeError):
             t.read(1)
-        t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
-                               encoding='quopri_codec')
+        with support.check_py3k_warnings():
+            t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
+                                   encoding='quopri_codec')
         with self.maybeRaises(TypeError):
             t.readline()
-        t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
-                               encoding='quopri_codec')
+        with support.check_py3k_warnings():
+            t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
+                                   encoding='quopri_codec')
         with self.maybeRaises(TypeError):
             t.read()
+        #else:
+            #t = _make_illegal_wrapper()
+            #self.assertRaises(TypeError, t.read, 1)
+            #t = _make_illegal_wrapper()
+            #self.assertRaises(TypeError, t.readline)
+            #t = _make_illegal_wrapper()
+            #self.assertRaises(TypeError, t.read)
 
 
 class CTextIOWrapperTest(TextIOWrapperTest):
@@ -2658,7 +2687,7 @@
         wr = weakref.ref(t)
         del t
         support.gc_collect()
-        self.assertTrue(wr() is None, wr)
+        self.assertIsNone(wr(), wr)
         with self.open(support.TESTFN, "rb") as f:
             self.assertEqual(f.read(), b"456def")
 
@@ -2808,7 +2837,7 @@
     def test___all__(self):
         for name in self.io.__all__:
             obj = getattr(self.io, name, None)
-            self.assertTrue(obj is not None, name)
+            self.assertIsNotNone(obj, name)
             if name == "open":
                 continue
             elif "error" in name.lower() or name == "UnsupportedOperation":
@@ -2903,7 +2932,7 @@
         wr = weakref.ref(c)
         del c, b
         support.gc_collect()
-        self.assertTrue(wr() is None, wr)
+        self.assertIsNone(wr(), wr)
 
     def test_abcs(self):
         # Test the visible base classes are ABCs.
@@ -2996,15 +3025,17 @@
             received += iter(rf.read, None)
 
         sent, received = b''.join(sent), b''.join(received)
-        self.assertTrue(sent == received)
+        self.assertEqual(sent, received)
         self.assertTrue(wf.closed)
         self.assertTrue(rf.closed)
 
 class CMiscIOTest(MiscIOTest):
     io = io
+    shutdown_error = "RuntimeError: could not find io module state"
 
 class PyMiscIOTest(MiscIOTest):
     io = pyio
+    shutdown_error = "LookupError: unknown encoding: ascii"
 
 
 @unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
diff --git a/lib/python2.7/test/test_itertools.py b/lib/python2.7/test/test_itertools.py
index 753aa17..8b5f051 100644
--- a/lib/python2.7/test/test_itertools.py
+++ b/lib/python2.7/test/test_itertools.py
@@ -1525,6 +1525,11 @@
 ...     "Returns the nth item or a default value"
 ...     return next(islice(iterable, n, None), default)
 
+>>> def all_equal(iterable):
+...     "Returns True if all the elements are equal to each other"
+...     g = groupby(iterable)
+...     return next(g, True) and not next(g, False)
+
 >>> def quantify(iterable, pred=bool):
 ...     "Count how many times the predicate is true"
 ...     return sum(imap(pred, iterable))
@@ -1623,6 +1628,9 @@
 >>> nth('abcde', 9) is None
 True
 
+>>> [all_equal(s) for s in ('', 'A', 'AAAA', 'AAAB', 'AAABA')]
+[True, True, True, False, False]
+
 >>> quantify(xrange(99), lambda x: x%2==0)
 50
 
diff --git a/lib/python2.7/test/test_locale.py b/lib/python2.7/test/test_locale.py
index 719175b..563ddb1 100644
--- a/lib/python2.7/test/test_locale.py
+++ b/lib/python2.7/test/test_locale.py
@@ -493,6 +493,16 @@
         # longer accept unicode strings.
         self.assertEqual(locale.normalize(u'en_US'), 'en_US.ISO8859-1')
 
+    def test_setlocale_unicode(self):
+        oldlocale = locale.getlocale()
+        self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
+
+        user_locale = locale.setlocale(locale.LC_CTYPE, '')
+        unicode_locale = user_locale.decode('utf-8')
+
+        user_locale2 = locale.setlocale(locale.LC_CTYPE, unicode_locale)
+        self.assertEqual(user_locale, user_locale2)
+
 
 def test_main():
     tests = [
diff --git a/lib/python2.7/test/test_long.py b/lib/python2.7/test/test_long.py
index 8d16bb0..b65d24c 100644
--- a/lib/python2.7/test/test_long.py
+++ b/lib/python2.7/test/test_long.py
@@ -79,6 +79,12 @@
         (unichr(0x200), ValueError),
 ]
 
+class LongSubclass(long):
+    pass
+
+class OtherLongSubclass(long):
+    pass
+
 class LongTest(test_int.IntLongCommonTests, unittest.TestCase):
 
     ntype = long
@@ -216,43 +222,43 @@
         for n in xrange(2*SHIFT):
             p2 = 2L ** n
             eq(x << n >> n, x,
-                Frm("x << n >> n != x for x=%r, n=%r", (x, n)))
+                Frm("x << n >> n != x for x=%r, n=%r", x, n))
             eq(x // p2, x >> n,
-                Frm("x // p2 != x >> n for x=%r n=%r p2=%r", (x, n, p2)))
+                Frm("x // p2 != x >> n for x=%r n=%r p2=%r", x, n, p2))
             eq(x * p2, x << n,
-                Frm("x * p2 != x << n for x=%r n=%r p2=%r", (x, n, p2)))
+                Frm("x * p2 != x << n for x=%r n=%r p2=%r", x, n, p2))
             eq(x & -p2, x >> n << n,
-                Frm("not x & -p2 == x >> n << n for x=%r n=%r p2=%r", (x, n, p2)))
+                Frm("not x & -p2 == x >> n << n for x=%r n=%r p2=%r", x, n, p2))
             eq(x & -p2, x & ~(p2 - 1),
-                Frm("not x & -p2 == x & ~(p2 - 1) for x=%r n=%r p2=%r", (x, n, p2)))
+                Frm("not x & -p2 == x & ~(p2 - 1) for x=%r n=%r p2=%r", x, n, p2))
 
     def check_bitop_identities_2(self, x, y):
         eq = self.assertEqual
-        eq(x & y, y & x, Frm("x & y != y & x for x=%r, y=%r", (x, y)))
-        eq(x | y, y | x, Frm("x | y != y | x for x=%r, y=%r", (x, y)))
-        eq(x ^ y, y ^ x, Frm("x ^ y != y ^ x for x=%r, y=%r", (x, y)))
-        eq(x ^ y ^ x, y, Frm("x ^ y ^ x != y for x=%r, y=%r", (x, y)))
-        eq(x & y, ~(~x | ~y), Frm("x & y != ~(~x | ~y) for x=%r, y=%r", (x, y)))
-        eq(x | y, ~(~x & ~y), Frm("x | y != ~(~x & ~y) for x=%r, y=%r", (x, y)))
+        eq(x & y, y & x, Frm("x & y != y & x for x=%r, y=%r", x, y))
+        eq(x | y, y | x, Frm("x | y != y | x for x=%r, y=%r", x, y))
+        eq(x ^ y, y ^ x, Frm("x ^ y != y ^ x for x=%r, y=%r", x, y))
+        eq(x ^ y ^ x, y, Frm("x ^ y ^ x != y for x=%r, y=%r", x, y))
+        eq(x & y, ~(~x | ~y), Frm("x & y != ~(~x | ~y) for x=%r, y=%r", x, y))
+        eq(x | y, ~(~x & ~y), Frm("x | y != ~(~x & ~y) for x=%r, y=%r", x, y))
         eq(x ^ y, (x | y) & ~(x & y),
-             Frm("x ^ y != (x | y) & ~(x & y) for x=%r, y=%r", (x, y)))
+             Frm("x ^ y != (x | y) & ~(x & y) for x=%r, y=%r", x, y))
         eq(x ^ y, (x & ~y) | (~x & y),
-             Frm("x ^ y == (x & ~y) | (~x & y) for x=%r, y=%r", (x, y)))
+             Frm("x ^ y == (x & ~y) | (~x & y) for x=%r, y=%r", x, y))
         eq(x ^ y, (x | y) & (~x | ~y),
-             Frm("x ^ y == (x | y) & (~x | ~y) for x=%r, y=%r", (x, y)))
+             Frm("x ^ y == (x | y) & (~x | ~y) for x=%r, y=%r", x, y))
 
     def check_bitop_identities_3(self, x, y, z):
         eq = self.assertEqual
         eq((x & y) & z, x & (y & z),
-             Frm("(x & y) & z != x & (y & z) for x=%r, y=%r, z=%r", (x, y, z)))
+             Frm("(x & y) & z != x & (y & z) for x=%r, y=%r, z=%r", x, y, z))
         eq((x | y) | z, x | (y | z),
-             Frm("(x | y) | z != x | (y | z) for x=%r, y=%r, z=%r", (x, y, z)))
+             Frm("(x | y) | z != x | (y | z) for x=%r, y=%r, z=%r", x, y, z))
         eq((x ^ y) ^ z, x ^ (y ^ z),
-             Frm("(x ^ y) ^ z != x ^ (y ^ z) for x=%r, y=%r, z=%r", (x, y, z)))
+             Frm("(x ^ y) ^ z != x ^ (y ^ z) for x=%r, y=%r, z=%r", x, y, z))
         eq(x & (y | z), (x & y) | (x & z),
-             Frm("x & (y | z) != (x & y) | (x & z) for x=%r, y=%r, z=%r", (x, y, z)))
+             Frm("x & (y | z) != (x & y) | (x & z) for x=%r, y=%r, z=%r", x, y, z))
         eq(x | (y & z), (x | y) & (x | z),
-             Frm("x | (y & z) != (x | y) & (x | z) for x=%r, y=%r, z=%r", (x, y, z)))
+             Frm("x | (y & z) != (x | y) & (x | z) for x=%r, y=%r, z=%r", x, y, z))
 
     def test_bitop_identities(self):
         for x in special:
@@ -539,6 +545,17 @@
                     self.fail("Failed to raise TypeError with %s" %
                               ((base, trunc_result_base),))
 
+                class TruncReturnsLongSubclass(base):
+                    def __long__(self):
+                        return OtherLongSubclass(42L)
+                good_int = TruncReturnsLongSubclass()
+                n = long(good_int)
+                self.assertEqual(n, 42L)
+                self.assertIs(type(n), OtherLongSubclass)
+                n = LongSubclass(good_int)
+                self.assertEqual(n, 42L)
+                self.assertIs(type(n), LongSubclass)
+
     def test_misc(self):
 
         # check the extremes in int<->long conversion
diff --git a/lib/python2.7/test/test_memoryio.py b/lib/python2.7/test/test_memoryio.py
index fdd642b..0eb9961 100644
--- a/lib/python2.7/test/test_memoryio.py
+++ b/lib/python2.7/test/test_memoryio.py
@@ -673,7 +673,8 @@
         self.assertEqual(len(state), 3)
         bytearray(state[0]) # Check if state[0] supports the buffer interface.
         self.assertIsInstance(state[1], int)
-        self.assertTrue(isinstance(state[2], dict) or state[2] is None)
+        if state[2] is not None:
+            self.assertIsInstance(state[2], dict)
         memio.close()
         self.assertRaises(ValueError, memio.__getstate__)
 
@@ -729,7 +730,8 @@
         self.assertIsInstance(state[0], unicode)
         self.assertIsInstance(state[1], str)
         self.assertIsInstance(state[2], int)
-        self.assertTrue(isinstance(state[3], dict) or state[3] is None)
+        if state[3] is not None:
+            self.assertIsInstance(state[3], dict)
         memio.close()
         self.assertRaises(ValueError, memio.__getstate__)
 
diff --git a/lib/python2.7/test/test_memoryview.py b/lib/python2.7/test/test_memoryview.py
index f14bafd..4407af8 100644
--- a/lib/python2.7/test/test_memoryview.py
+++ b/lib/python2.7/test/test_memoryview.py
@@ -10,6 +10,8 @@
 import array
 from test import test_support
 import io
+import copy
+import pickle
 
 
 class AbstractMemoryTests:
@@ -354,6 +356,20 @@
     #pass
 
 
+class OtherTest(unittest.TestCase):
+    def test_copy(self):
+        m = memoryview(b'abc')
+        with self.assertRaises(TypeError):
+            copy.copy(m)
+
+    # See issue #22995
+    ## def test_pickle(self):
+    ##     m = memoryview(b'abc')
+    ##     for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+    ##         with self.assertRaises(TypeError):
+    ##             pickle.dumps(m, proto)
+
+
 def test_main():
     test_support.run_unittest(__name__)
 
diff --git a/lib/python2.7/test/test_minidom.py b/lib/python2.7/test/test_minidom.py
index a962ddc..b6d88d2 100644
--- a/lib/python2.7/test/test_minidom.py
+++ b/lib/python2.7/test/test_minidom.py
@@ -1,5 +1,6 @@
 # test for xml.dom.minidom
 
+import copy
 import pickle
 from StringIO import StringIO
 from test.test_support import verbose, run_unittest, findfile
@@ -14,7 +15,13 @@
 
 
 tstfile = findfile("test.xml", subdir="xmltestdata")
-
+sample = ("<?xml version='1.0' encoding='us-ascii'?>\n"
+          "<!DOCTYPE doc PUBLIC 'http://xml.python.org/public'"
+          " 'http://xml.python.org/system' [\n"
+          "  <!ELEMENT e EMPTY>\n"
+          "  <!ENTITY ent SYSTEM 'http://xml.python.org/entity'>\n"
+          "]><doc attr='value'> text\n"
+          "<?pi sample?> <!-- comment --> <e/> </doc>")
 
 # The tests of DocumentType importing use these helpers to construct
 # the documents to work with, since not all DOM builders actually
@@ -1377,52 +1384,54 @@
         self.confirm(e.isSameNode(doc.getElementById("w"))
                 and a2.isId)
 
+    def assert_recursive_equal(self, doc, doc2):
+        stack = [(doc, doc2)]
+        while stack:
+            n1, n2 = stack.pop()
+            self.assertEqual(n1.nodeType, n2.nodeType)
+            self.assertEqual(len(n1.childNodes), len(n2.childNodes))
+            self.assertEqual(n1.nodeName, n2.nodeName)
+            self.assertFalse(n1.isSameNode(n2))
+            self.assertFalse(n2.isSameNode(n1))
+            if n1.nodeType == Node.DOCUMENT_TYPE_NODE:
+                len(n1.entities)
+                len(n2.entities)
+                len(n1.notations)
+                len(n2.notations)
+                self.assertEqual(len(n1.entities), len(n2.entities))
+                self.assertEqual(len(n1.notations), len(n2.notations))
+                for i in range(len(n1.notations)):
+                    # XXX this loop body doesn't seem to be executed?
+                    no1 = n1.notations.item(i)
+                    no2 = n1.notations.item(i)
+                    self.assertEqual(no1.name, no2.name)
+                    self.assertEqual(no1.publicId, no2.publicId)
+                    self.assertEqual(no1.systemId, no2.systemId)
+                    stack.append((no1, no2))
+                for i in range(len(n1.entities)):
+                    e1 = n1.entities.item(i)
+                    e2 = n2.entities.item(i)
+                    self.assertEqual(e1.notationName, e2.notationName)
+                    self.assertEqual(e1.publicId, e2.publicId)
+                    self.assertEqual(e1.systemId, e2.systemId)
+                    stack.append((e1, e2))
+            if n1.nodeType != Node.DOCUMENT_NODE:
+                self.assertTrue(n1.ownerDocument.isSameNode(doc))
+                self.assertTrue(n2.ownerDocument.isSameNode(doc2))
+            for i in range(len(n1.childNodes)):
+                stack.append((n1.childNodes[i], n2.childNodes[i]))
+
     def testPickledDocument(self):
-        doc = parseString("<?xml version='1.0' encoding='us-ascii'?>\n"
-                    "<!DOCTYPE doc PUBLIC 'http://xml.python.org/public'"
-                    " 'http://xml.python.org/system' [\n"
-                    "  <!ELEMENT e EMPTY>\n"
-                    "  <!ENTITY ent SYSTEM 'http://xml.python.org/entity'>\n"
-                    "]><doc attr='value'> text\n"
-                    "<?pi sample?> <!-- comment --> <e/> </doc>")
-        for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+        doc = parseString(sample)
+        for proto in range(pickle.HIGHEST_PROTOCOL + 1):
             s = pickle.dumps(doc, proto)
             doc2 = pickle.loads(s)
-            stack = [(doc, doc2)]
-            while stack:
-                n1, n2 = stack.pop()
-                self.confirm(n1.nodeType == n2.nodeType
-                        and len(n1.childNodes) == len(n2.childNodes)
-                        and n1.nodeName == n2.nodeName
-                        and not n1.isSameNode(n2)
-                        and not n2.isSameNode(n1))
-                if n1.nodeType == Node.DOCUMENT_TYPE_NODE:
-                    len(n1.entities)
-                    len(n2.entities)
-                    len(n1.notations)
-                    len(n2.notations)
-                    self.confirm(len(n1.entities) == len(n2.entities)
-                            and len(n1.notations) == len(n2.notations))
-                    for i in range(len(n1.notations)):
-                        # XXX this loop body doesn't seem to be executed?
-                        no1 = n1.notations.item(i)
-                        no2 = n1.notations.item(i)
-                        self.confirm(no1.name == no2.name
-                                and no1.publicId == no2.publicId
-                                and no1.systemId == no2.systemId)
-                        stack.append((no1, no2))
-                    for i in range(len(n1.entities)):
-                        e1 = n1.entities.item(i)
-                        e2 = n2.entities.item(i)
-                        self.confirm(e1.notationName == e2.notationName
-                                and e1.publicId == e2.publicId
-                                and e1.systemId == e2.systemId)
-                        stack.append((e1, e2))
-                if n1.nodeType != Node.DOCUMENT_NODE:
-                    self.confirm(n1.ownerDocument.isSameNode(doc)
-                            and n2.ownerDocument.isSameNode(doc2))
-                for i in range(len(n1.childNodes)):
-                    stack.append((n1.childNodes[i], n2.childNodes[i]))
+            self.assert_recursive_equal(doc, doc2)
+
+    def testDeepcopiedDocument(self):
+        doc = parseString(sample)
+        doc2 = copy.deepcopy(doc)
+        self.assert_recursive_equal(doc, doc2)
 
     def testSerializeCommentNodeWithDoubleHyphen(self):
         doc = create_doc_without_doctype()
diff --git a/lib/python2.7/test/test_ordered_dict.py b/lib/python2.7/test/test_ordered_dict.py
new file mode 100644
index 0000000..78b3e83
--- /dev/null
+++ b/lib/python2.7/test/test_ordered_dict.py
@@ -0,0 +1,293 @@
+import copy
+import pickle
+from random import shuffle
+import unittest
+from collections import OrderedDict
+from collections import MutableMapping
+from test import mapping_tests, test_support
+
+
+class TestOrderedDict(unittest.TestCase):
+
+    def test_init(self):
+        with self.assertRaises(TypeError):
+            OrderedDict([('a', 1), ('b', 2)], None)                                 # too many args
+        pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
+        self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs)           # dict input
+        self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs)         # kwds input
+        self.assertEqual(list(OrderedDict(pairs).items()), pairs)                   # pairs input
+        self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)],
+                                          c=3, e=5).items()), pairs)                # mixed input
+
+        # make sure no positional args conflict with possible kwdargs
+        self.assertEqual(list(OrderedDict(self=42).items()), [('self', 42)])
+        self.assertEqual(list(OrderedDict(other=42).items()), [('other', 42)])
+        self.assertRaises(TypeError, OrderedDict, 42)
+        self.assertRaises(TypeError, OrderedDict, (), ())
+        self.assertRaises(TypeError, OrderedDict.__init__)
+
+        # Make sure that direct calls to __init__ do not clear previous contents
+        d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
+        d.__init__([('e', 5), ('f', 6)], g=7, d=4)
+        self.assertEqual(list(d.items()),
+            [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
+
+    def test_update(self):
+        with self.assertRaises(TypeError):
+            OrderedDict().update([('a', 1), ('b', 2)], None)                        # too many args
+        pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
+        od = OrderedDict()
+        od.update(dict(pairs))
+        self.assertEqual(sorted(od.items()), pairs)                                 # dict input
+        od = OrderedDict()
+        od.update(**dict(pairs))
+        self.assertEqual(sorted(od.items()), pairs)                                 # kwds input
+        od = OrderedDict()
+        od.update(pairs)
+        self.assertEqual(list(od.items()), pairs)                                   # pairs input
+        od = OrderedDict()
+        od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5)
+        self.assertEqual(list(od.items()), pairs)                                   # mixed input
+
+        # Issue 9137: Named argument called 'other' or 'self'
+        # shouldn't be treated specially.
+        od = OrderedDict()
+        od.update(self=23)
+        self.assertEqual(list(od.items()), [('self', 23)])
+        od = OrderedDict()
+        od.update(other={})
+        self.assertEqual(list(od.items()), [('other', {})])
+        od = OrderedDict()
+        od.update(red=5, blue=6, other=7, self=8)
+        self.assertEqual(sorted(list(od.items())),
+                         [('blue', 6), ('other', 7), ('red', 5), ('self', 8)])
+
+        # Make sure that direct calls to update do not clear previous contents
+        # add that updates items are not moved to the end
+        d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
+        d.update([('e', 5), ('f', 6)], g=7, d=4)
+        self.assertEqual(list(d.items()),
+            [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
+
+        self.assertRaises(TypeError, OrderedDict().update, 42)
+        self.assertRaises(TypeError, OrderedDict().update, (), ())
+        self.assertRaises(TypeError, OrderedDict.update)
+
+    def test_abc(self):
+        self.assertIsInstance(OrderedDict(), MutableMapping)
+        self.assertTrue(issubclass(OrderedDict, MutableMapping))
+
+    def test_clear(self):
+        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+        shuffle(pairs)
+        od = OrderedDict(pairs)
+        self.assertEqual(len(od), len(pairs))
+        od.clear()
+        self.assertEqual(len(od), 0)
+
+    def test_delitem(self):
+        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+        od = OrderedDict(pairs)
+        del od['a']
+        self.assertNotIn('a', od)
+        with self.assertRaises(KeyError):
+            del od['a']
+        self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
+
+    def test_setitem(self):
+        od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)])
+        od['c'] = 10           # existing element
+        od['f'] = 20           # new element
+        self.assertEqual(list(od.items()),
+                         [('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)])
+
+    def test_iterators(self):
+        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+        shuffle(pairs)
+        od = OrderedDict(pairs)
+        self.assertEqual(list(od), [t[0] for t in pairs])
+        self.assertEqual(od.keys()[:], [t[0] for t in pairs])
+        self.assertEqual(od.values()[:], [t[1] for t in pairs])
+        self.assertEqual(od.items()[:], pairs)
+        self.assertEqual(list(od.iterkeys()), [t[0] for t in pairs])
+        self.assertEqual(list(od.itervalues()), [t[1] for t in pairs])
+        self.assertEqual(list(od.iteritems()), pairs)
+        self.assertEqual(list(reversed(od)),
+                         [t[0] for t in reversed(pairs)])
+
+    def test_popitem(self):
+        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+        shuffle(pairs)
+        od = OrderedDict(pairs)
+        while pairs:
+            self.assertEqual(od.popitem(), pairs.pop())
+        with self.assertRaises(KeyError):
+            od.popitem()
+        self.assertEqual(len(od), 0)
+
+    def test_pop(self):
+        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+        shuffle(pairs)
+        od = OrderedDict(pairs)
+        shuffle(pairs)
+        while pairs:
+            k, v = pairs.pop()
+            self.assertEqual(od.pop(k), v)
+        with self.assertRaises(KeyError):
+            od.pop('xyz')
+        self.assertEqual(len(od), 0)
+        self.assertEqual(od.pop(k, 12345), 12345)
+
+        # make sure pop still works when __missing__ is defined
+        class Missing(OrderedDict):
+            def __missing__(self, key):
+                return 0
+        m = Missing(a=1)
+        self.assertEqual(m.pop('b', 5), 5)
+        self.assertEqual(m.pop('a', 6), 1)
+        self.assertEqual(m.pop('a', 6), 6)
+        with self.assertRaises(KeyError):
+            m.pop('a')
+
+    def test_equality(self):
+        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+        shuffle(pairs)
+        od1 = OrderedDict(pairs)
+        od2 = OrderedDict(pairs)
+        self.assertEqual(od1, od2)          # same order implies equality
+        pairs = pairs[2:] + pairs[:2]
+        od2 = OrderedDict(pairs)
+        self.assertNotEqual(od1, od2)       # different order implies inequality
+        # comparison to regular dict is not order sensitive
+        self.assertEqual(od1, dict(od2))
+        self.assertEqual(dict(od2), od1)
+        # different length implied inequality
+        self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
+
+    def test_copying(self):
+        # Check that ordered dicts are copyable, deepcopyable, picklable,
+        # and have a repr/eval round-trip
+        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+        od = OrderedDict(pairs)
+        update_test = OrderedDict()
+        update_test.update(od)
+        for i, dup in enumerate([
+                    od.copy(),
+                    copy.copy(od),
+                    copy.deepcopy(od),
+                    pickle.loads(pickle.dumps(od, 0)),
+                    pickle.loads(pickle.dumps(od, 1)),
+                    pickle.loads(pickle.dumps(od, 2)),
+                    pickle.loads(pickle.dumps(od, -1)),
+                    eval(repr(od)),
+                    update_test,
+                    OrderedDict(od),
+                    ]):
+            self.assertTrue(dup is not od)
+            self.assertEqual(dup, od)
+            self.assertEqual(list(dup.items()), list(od.items()))
+            self.assertEqual(len(dup), len(od))
+            self.assertEqual(type(dup), type(od))
+
+    def test_yaml_linkage(self):
+        # Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature.
+        # In yaml, lists are native but tuples are not.
+        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+        od = OrderedDict(pairs)
+        # yaml.dump(od) -->
+        # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n  - [b, 2]\n'
+        self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1]))
+
+    def test_reduce_not_too_fat(self):
+        # do not save instance dictionary if not needed
+        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+        od = OrderedDict(pairs)
+        self.assertEqual(len(od.__reduce__()), 2)
+        od.x = 10
+        self.assertEqual(len(od.__reduce__()), 3)
+
+    def test_repr(self):
+        od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])
+        self.assertEqual(repr(od),
+            "OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])")
+        self.assertEqual(eval(repr(od)), od)
+        self.assertEqual(repr(OrderedDict()), "OrderedDict()")
+
+    def test_repr_recursive(self):
+        # See issue #9826
+        od = OrderedDict.fromkeys('abc')
+        od['x'] = od
+        self.assertEqual(repr(od),
+            "OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])")
+
+    def test_setdefault(self):
+        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+        shuffle(pairs)
+        od = OrderedDict(pairs)
+        pair_order = list(od.items())
+        self.assertEqual(od.setdefault('a', 10), 3)
+        # make sure order didn't change
+        self.assertEqual(list(od.items()), pair_order)
+        self.assertEqual(od.setdefault('x', 10), 10)
+        # make sure 'x' is added to the end
+        self.assertEqual(list(od.items())[-1], ('x', 10))
+
+        # make sure setdefault still works when __missing__ is defined
+        class Missing(OrderedDict):
+            def __missing__(self, key):
+                return 0
+        self.assertEqual(Missing().setdefault(5, 9), 9)
+
+    def test_reinsert(self):
+        # Given insert a, insert b, delete a, re-insert a,
+        # verify that a is now later than b.
+        od = OrderedDict()
+        od['a'] = 1
+        od['b'] = 2
+        del od['a']
+        od['a'] = 1
+        self.assertEqual(list(od.items()), [('b', 2), ('a', 1)])
+
+    def test_views(self):
+        s = 'the quick brown fox jumped over a lazy dog yesterday before dawn'.split()
+        od = OrderedDict.fromkeys(s)
+        self.assertEqual(list(od.viewkeys()),  s)
+        self.assertEqual(list(od.viewvalues()),  [None for k in s])
+        self.assertEqual(list(od.viewitems()),  [(k, None) for k in s])
+
+        # See http://bugs.python.org/issue24286
+        self.assertEqual(od.viewkeys(), dict(od).viewkeys())
+        self.assertEqual(od.viewitems(), dict(od).viewitems())
+
+    def test_override_update(self):
+        # Verify that subclasses can override update() without breaking __init__()
+        class MyOD(OrderedDict):
+            def update(self, *args, **kwds):
+                raise Exception()
+        items = [('a', 1), ('c', 3), ('b', 2)]
+        self.assertEqual(list(MyOD(items).items()), items)
+
+class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
+    type2test = OrderedDict
+
+    def test_popitem(self):
+        d = self._empty_mapping()
+        self.assertRaises(KeyError, d.popitem)
+
+class MyOrderedDict(OrderedDict):
+    pass
+
+class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
+    type2test = MyOrderedDict
+
+    def test_popitem(self):
+        d = self._empty_mapping()
+        self.assertRaises(KeyError, d.popitem)
+
+
+def test_main(verbose=None):
+    test_classes = [TestOrderedDict, GeneralMappingTests, SubclassMappingTests]
+    test_support.run_unittest(*test_classes)
+
+if __name__ == "__main__":
+    test_main(verbose=True)
diff --git a/lib/python2.7/test/test_os.py b/lib/python2.7/test/test_os.py
index 6c7ea7a..57c7c88 100644
--- a/lib/python2.7/test/test_os.py
+++ b/lib/python2.7/test/test_os.py
@@ -856,7 +856,7 @@
             os.kill(proc.pid, signal.SIGINT)
             self.fail("subprocess did not stop on {}".format(name))
 
-    @unittest.skip("subprocesses aren't inheriting CTRL+C property")
+    @unittest.skip("subprocesses aren't inheriting Ctrl+C property")
     def test_CTRL_C_EVENT(self):
         from ctypes import wintypes
         import ctypes
@@ -869,7 +869,7 @@
         SetConsoleCtrlHandler.restype = wintypes.BOOL
 
         # Calling this with NULL and FALSE causes the calling process to
-        # handle CTRL+C, rather than ignore it. This property is inherited
+        # handle Ctrl+C, rather than ignore it. This property is inherited
         # by subprocesses.
         SetConsoleCtrlHandler(NULL, 0)
 
diff --git a/lib/python2.7/test/test_pdb.py b/lib/python2.7/test/test_pdb.py
index b6dd2b7..b98fe19 100644
--- a/lib/python2.7/test/test_pdb.py
+++ b/lib/python2.7/test/test_pdb.py
@@ -69,6 +69,17 @@
             any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
             'Fail to step into the caller after a return')
 
+    def test_issue16180(self):
+        # A syntax error in the debuggee.
+        script = "def f: pass\n"
+        commands = ''
+        expected = "SyntaxError:"
+        stdout, stderr = self.run_pdb(script, commands)
+        self.assertIn(expected, stdout,
+            '\n\nExpected:\n{}\nGot:\n{}\n'
+            'Fail to handle a syntax error in the debuggee.'
+            .format(expected, stdout))
+
 
 class PdbTestInput(object):
     """Context manager that makes testing Pdb in doctests easier."""
diff --git a/lib/python2.7/test/test_pep277.py b/lib/python2.7/test/test_pep277.py
index 92b82d0..cbc36cf 100644
--- a/lib/python2.7/test/test_pep277.py
+++ b/lib/python2.7/test/test_pep277.py
@@ -164,17 +164,11 @@
         dirname = os.path.join(test_support.TESTFN,
                                u'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
         filename = u'\xdf-\u66e8\u66e9\u66eb'
-        oldwd = os.getcwd()
-        os.mkdir(dirname)
-        os.chdir(dirname)
-        try:
+        with test_support.temp_cwd(dirname):
             with open(filename, 'w') as f:
                 f.write((filename + '\n').encode("utf-8"))
             os.access(filename,os.R_OK)
             os.remove(filename)
-        finally:
-            os.chdir(oldwd)
-            os.rmdir(dirname)
 
 
 class UnicodeNFCFileTests(UnicodeFileTests):
diff --git a/lib/python2.7/test/test_pickle.py b/lib/python2.7/test/test_pickle.py
index c312649..bb43656 100644
--- a/lib/python2.7/test/test_pickle.py
+++ b/lib/python2.7/test/test_pickle.py
@@ -1,15 +1,18 @@
 import pickle
+import struct
 from cStringIO import StringIO
 
 from test import test_support
 
-from test.pickletester import (AbstractPickleTests,
+from test.pickletester import (AbstractUnpickleTests,
+                               AbstractPickleTests,
                                AbstractPickleModuleTests,
                                AbstractPersistentPicklerTests,
                                AbstractPicklerUnpicklerObjectTests,
                                BigmemPickleTests)
 
-class PickleTests(AbstractPickleTests, AbstractPickleModuleTests):
+class PickleTests(AbstractUnpickleTests, AbstractPickleTests,
+                  AbstractPickleModuleTests):
 
     def dumps(self, arg, proto=0, fast=0):
         # Ignore fast
@@ -21,10 +24,31 @@
 
     module = pickle
     error = KeyError
+    bad_stack_errors = (IndexError,)
+    bad_mark_errors = (IndexError, pickle.UnpicklingError,
+                       TypeError, AttributeError, EOFError)
+    truncated_errors = (pickle.UnpicklingError, EOFError,
+                        AttributeError, ValueError,
+                        struct.error, IndexError, ImportError,
+                        TypeError, KeyError)
 
-class PicklerTests(AbstractPickleTests):
+class UnpicklerTests(AbstractUnpickleTests):
 
     error = KeyError
+    bad_stack_errors = (IndexError,)
+    bad_mark_errors = (IndexError, pickle.UnpicklingError,
+                       TypeError, AttributeError, EOFError)
+    truncated_errors = (pickle.UnpicklingError, EOFError,
+                        AttributeError, ValueError,
+                        struct.error, IndexError, ImportError,
+                        TypeError, KeyError)
+
+    def loads(self, buf):
+        f = StringIO(buf)
+        u = pickle.Unpickler(f)
+        return u.load()
+
+class PicklerTests(AbstractPickleTests):
 
     def dumps(self, arg, proto=0, fast=0):
         f = StringIO()
@@ -81,6 +105,7 @@
 def test_main():
     test_support.run_unittest(
         PickleTests,
+        UnpicklerTests,
         PicklerTests,
         PersPicklerTests,
         PicklerUnpicklerObjectTests,
diff --git a/lib/python2.7/test/test_posixpath.py b/lib/python2.7/test/test_posixpath.py
index 13381e5..686b6b9 100644
--- a/lib/python2.7/test/test_posixpath.py
+++ b/lib/python2.7/test/test_posixpath.py
@@ -1,5 +1,6 @@
 import unittest
 from test import test_support, test_genericpath
+from test import test_support as support
 
 import posixpath
 import os
@@ -251,7 +252,6 @@
             # Bug #930024, return the path unchanged if we get into an infinite
             # symlink loop.
             try:
-                old_path = abspath('.')
                 os.symlink(ABSTFN, ABSTFN)
                 self.assertEqual(realpath(ABSTFN), ABSTFN)
 
@@ -277,10 +277,9 @@
                 self.assertEqual(realpath(ABSTFN+"c"), ABSTFN+"c")
 
                 # Test using relative path as well.
-                os.chdir(dirname(ABSTFN))
-                self.assertEqual(realpath(basename(ABSTFN)), ABSTFN)
+                with support.change_cwd(dirname(ABSTFN)):
+                    self.assertEqual(realpath(basename(ABSTFN)), ABSTFN)
             finally:
-                os.chdir(old_path)
                 test_support.unlink(ABSTFN)
                 test_support.unlink(ABSTFN+"1")
                 test_support.unlink(ABSTFN+"2")
@@ -302,7 +301,6 @@
 
         def test_realpath_deep_recursion(self):
             depth = 10
-            old_path = abspath('.')
             try:
                 os.mkdir(ABSTFN)
                 for i in range(depth):
@@ -311,10 +309,9 @@
                 self.assertEqual(realpath(ABSTFN + '/%d' % depth), ABSTFN)
 
                 # Test using relative path as well.
-                os.chdir(ABSTFN)
-                self.assertEqual(realpath('%d' % depth), ABSTFN)
+                with support.change_cwd(ABSTFN):
+                    self.assertEqual(realpath('%d' % depth), ABSTFN)
             finally:
-                os.chdir(old_path)
                 for i in range(depth + 1):
                     test_support.unlink(ABSTFN + '/%d' % i)
                 safe_rmdir(ABSTFN)
@@ -325,15 +322,13 @@
             # /usr/doc with 'doc' being a symlink to /usr/share/doc. We call
             # realpath("a"). This should return /usr/share/doc/a/.
             try:
-                old_path = abspath('.')
                 os.mkdir(ABSTFN)
                 os.mkdir(ABSTFN + "/y")
                 os.symlink(ABSTFN + "/y", ABSTFN + "/k")
 
-                os.chdir(ABSTFN + "/k")
-                self.assertEqual(realpath("a"), ABSTFN + "/y/a")
+                with support.change_cwd(ABSTFN + "/k"):
+                    self.assertEqual(realpath("a"), ABSTFN + "/y/a")
             finally:
-                os.chdir(old_path)
                 test_support.unlink(ABSTFN + "/k")
                 safe_rmdir(ABSTFN + "/y")
                 safe_rmdir(ABSTFN)
@@ -347,7 +342,6 @@
             # and a symbolic link 'link-y' pointing to 'y' in directory 'a',
             # then realpath("link-y/..") should return 'k', not 'a'.
             try:
-                old_path = abspath('.')
                 os.mkdir(ABSTFN)
                 os.mkdir(ABSTFN + "/k")
                 os.mkdir(ABSTFN + "/k/y")
@@ -356,11 +350,10 @@
                 # Absolute path.
                 self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
                 # Relative path.
-                os.chdir(dirname(ABSTFN))
-                self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."),
-                                 ABSTFN + "/k")
+                with support.change_cwd(dirname(ABSTFN)):
+                    self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."),
+                                     ABSTFN + "/k")
             finally:
-                os.chdir(old_path)
                 test_support.unlink(ABSTFN + "/link-y")
                 safe_rmdir(ABSTFN + "/k/y")
                 safe_rmdir(ABSTFN + "/k")
@@ -371,17 +364,14 @@
             # must be resolved too.
 
             try:
-                old_path = abspath('.')
                 os.mkdir(ABSTFN)
                 os.mkdir(ABSTFN + "/k")
                 os.symlink(ABSTFN, ABSTFN + "link")
-                os.chdir(dirname(ABSTFN))
-
-                base = basename(ABSTFN)
-                self.assertEqual(realpath(base + "link"), ABSTFN)
-                self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
+                with support.change_cwd(dirname(ABSTFN)):
+                    base = basename(ABSTFN)
+                    self.assertEqual(realpath(base + "link"), ABSTFN)
+                    self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
             finally:
-                os.chdir(old_path)
                 test_support.unlink(ABSTFN + "link")
                 safe_rmdir(ABSTFN + "/k")
                 safe_rmdir(ABSTFN)
diff --git a/lib/python2.7/test/test_pprint.py b/lib/python2.7/test/test_pprint.py
index 50493f6..bed9a93 100644
--- a/lib/python2.7/test/test_pprint.py
+++ b/lib/python2.7/test/test_pprint.py
@@ -56,6 +56,7 @@
         # Verify .isrecursive() and .isreadable() w/o recursion
         pp = pprint.PrettyPrinter()
         for safe in (2, 2.0, 2j, "abc", [3], (2,2), {3: 3}, uni("yaddayadda"),
+                     bytearray(b"ghi"), True, False, None,
                      self.a, self.b):
             # module-level convenience functions
             self.assertFalse(pprint.isrecursive(safe),
@@ -125,21 +126,23 @@
         # it sorted a dict display if and only if the display required
         # multiple lines.  For that reason, dicts with more than one element
         # aren't tested here.
-        for simple in (0, 0L, 0+0j, 0.0, "", uni(""),
+        for simple in (0, 0L, 0+0j, 0.0, "", uni(""), bytearray(),
                        (), tuple2(), tuple3(),
                        [], list2(), list3(),
                        set(), set2(), set3(),
                        frozenset(), frozenset2(), frozenset3(),
                        {}, dict2(), dict3(),
                        self.assertTrue, pprint,
-                       -6, -6L, -6-6j, -1.5, "x", uni("x"), (3,), [3], {3: 6},
+                       -6, -6L, -6-6j, -1.5, "x", uni("x"), bytearray(b"x"),
+                       (3,), [3], {3: 6},
                        (1,2), [3,4], {5: 6},
                        tuple2((1,2)), tuple3((1,2)), tuple3(range(100)),
                        [3,4], list2([3,4]), list3([3,4]), list3(range(100)),
                        set({7}), set2({7}), set3({7}),
                        frozenset({8}), frozenset2({8}), frozenset3({8}),
                        dict2({5: 6}), dict3({5: 6}),
-                       range(10, -11, -1)
+                       range(10, -11, -1),
+                       True, False, None,
                       ):
             native = repr(simple)
             self.assertEqual(pprint.pformat(simple), native)
diff --git a/lib/python2.7/test/test_py3kwarn.py b/lib/python2.7/test/test_py3kwarn.py
index 5aee6a5..b4e4e9d 100644
--- a/lib/python2.7/test/test_py3kwarn.py
+++ b/lib/python2.7/test/test_py3kwarn.py
@@ -2,6 +2,7 @@
 import sys
 from test.test_support import check_py3k_warnings, CleanImport, run_unittest
 import warnings
+from test import test_support
 
 if not sys.py3kwarning:
     raise unittest.SkipTest('%s must be run with the -3 flag' % __name__)
@@ -356,6 +357,21 @@
     def check_removal(self, module_name, optional=False):
         """Make sure the specified module, when imported, raises a
         DeprecationWarning and specifies itself in the message."""
+        if module_name in sys.modules:
+            mod = sys.modules[module_name]
+            filename = getattr(mod, '__file__', '')
+            mod = None
+            # the module is not implemented in C?
+            if not filename.endswith(('.py', '.pyc', '.pyo')):
+                # Issue #23375: If the module was already loaded, reimporting
+                # the module will not emit again the warning. The warning is
+                # emited when the module is loaded, but C modules cannot
+                # unloaded.
+                if test_support.verbose:
+                    print("Cannot test the Python 3 DeprecationWarning of the "
+                          "%s module, the C module is already loaded"
+                          % module_name)
+                return
         with CleanImport(module_name), warnings.catch_warnings():
             warnings.filterwarnings("error", ".+ (module|package) .+ removed",
                                     DeprecationWarning, __name__)
diff --git a/lib/python2.7/test/test_py_compile.py b/lib/python2.7/test/test_py_compile.py
index b919da2..5ec523a 100644
--- a/lib/python2.7/test/test_py_compile.py
+++ b/lib/python2.7/test/test_py_compile.py
@@ -5,7 +5,7 @@
 import tempfile
 import unittest
 
-from test import test_support
+from test import test_support as support
 
 class PyCompileTests(unittest.TestCase):
 
@@ -35,11 +35,9 @@
         self.assertTrue(os.path.exists(self.pyc_path))
 
     def test_cwd(self):
-        cwd = os.getcwd()
-        os.chdir(self.directory)
-        py_compile.compile(os.path.basename(self.source_path),
-                           os.path.basename(self.pyc_path))
-        os.chdir(cwd)
+        with support.change_cwd(self.directory):
+            py_compile.compile(os.path.basename(self.source_path),
+                               os.path.basename(self.pyc_path))
         self.assertTrue(os.path.exists(self.pyc_path))
 
     def test_relative_path(self):
@@ -48,7 +46,7 @@
         self.assertTrue(os.path.exists(self.pyc_path))
 
 def test_main():
-    test_support.run_unittest(PyCompileTests)
+    support.run_unittest(PyCompileTests)
 
 if __name__ == "__main__":
     test_main()
diff --git a/lib/python2.7/test/test_pyexpat.py b/lib/python2.7/test/test_pyexpat.py
index 9f63d4e..eba9058 100644
--- a/lib/python2.7/test/test_pyexpat.py
+++ b/lib/python2.7/test/test_pyexpat.py
@@ -13,27 +13,42 @@
 class SetAttributeTest(unittest.TestCase):
     def setUp(self):
         self.parser = expat.ParserCreate(namespace_separator='!')
-        self.set_get_pairs = [
-            [0, 0],
-            [1, 1],
-            [2, 1],
-            [0, 0],
-            ]
+
+    def test_buffer_text(self):
+        self.assertIs(self.parser.buffer_text, False)
+        for x in 0, 1, 2, 0:
+            self.parser.buffer_text = x
+            self.assertIs(self.parser.buffer_text, bool(x))
+
+    def test_namespace_prefixes(self):
+        self.assertIs(self.parser.namespace_prefixes, False)
+        for x in 0, 1, 2, 0:
+            self.parser.namespace_prefixes = x
+            self.assertIs(self.parser.namespace_prefixes, bool(x))
 
     def test_returns_unicode(self):
-        for x, y in self.set_get_pairs:
+        self.assertIs(self.parser.returns_unicode, test_support.have_unicode)
+        for x in 0, 1, 2, 0:
             self.parser.returns_unicode = x
-            self.assertEqual(self.parser.returns_unicode, y)
+            self.assertIs(self.parser.returns_unicode, bool(x))
 
     def test_ordered_attributes(self):
-        for x, y in self.set_get_pairs:
+        self.assertIs(self.parser.ordered_attributes, False)
+        for x in 0, 1, 2, 0:
             self.parser.ordered_attributes = x
-            self.assertEqual(self.parser.ordered_attributes, y)
+            self.assertIs(self.parser.ordered_attributes, bool(x))
 
     def test_specified_attributes(self):
-        for x, y in self.set_get_pairs:
+        self.assertIs(self.parser.specified_attributes, False)
+        for x in 0, 1, 2, 0:
             self.parser.specified_attributes = x
-            self.assertEqual(self.parser.specified_attributes, y)
+            self.assertIs(self.parser.specified_attributes, bool(x))
+
+    def test_invalid_attributes(self):
+        with self.assertRaises(AttributeError):
+            self.parser.foo = 1
+        with self.assertRaises(AttributeError):
+            self.parser.foo
 
 
 data = '''\
@@ -469,12 +484,14 @@
     def test_wrong_size(self):
         parser = expat.ParserCreate()
         parser.buffer_text = 1
-        def f(size):
-            parser.buffer_size = size
-
-        self.assertRaises(TypeError, f, sys.maxint+1)
-        self.assertRaises(ValueError, f, -1)
-        self.assertRaises(ValueError, f, 0)
+        with self.assertRaises(ValueError):
+            parser.buffer_size = -1
+        with self.assertRaises(ValueError):
+            parser.buffer_size = 0
+        with self.assertRaises(TypeError):
+            parser.buffer_size = 512.0
+        with self.assertRaises(TypeError):
+            parser.buffer_size = sys.maxint+1
 
     def test_unchanged_size(self):
         xml1 = ("<?xml version='1.0' encoding='iso8859'?><s>%s" % ('a' * 512))
diff --git a/lib/python2.7/test/test_random.py b/lib/python2.7/test/test_random.py
index 250f443..e4876fd 100644
--- a/lib/python2.7/test/test_random.py
+++ b/lib/python2.7/test/test_random.py
@@ -319,6 +319,11 @@
         self.assertRaises(TypeError, self.gen.setstate, (2, ('a',)*625, None))
         # Last element s/b an int also
         self.assertRaises(TypeError, self.gen.setstate, (2, (0,)*624+('a',), None))
+        # Last element s/b between 0 and 624
+        with self.assertRaises((ValueError, OverflowError)):
+            self.gen.setstate((2, (1,)*624+(625,), None))
+        with self.assertRaises((ValueError, OverflowError)):
+            self.gen.setstate((2, (1,)*624+(-1,), None))
 
     def test_referenceImplementation(self):
         # Compare the python implementation with results from the original
diff --git a/lib/python2.7/test/test_rlcompleter.py b/lib/python2.7/test/test_rlcompleter.py
index ac0e70d..99f0480 100644
--- a/lib/python2.7/test/test_rlcompleter.py
+++ b/lib/python2.7/test/test_rlcompleter.py
@@ -65,9 +65,43 @@
                          ['egg.{}('.format(x) for x in dir(str)
                           if x.startswith('s')])
 
+    def test_excessive_getattr(self):
+        # Ensure getattr() is invoked no more than once per attribute
+        class Foo:
+            calls = 0
+            @property
+            def bar(self):
+                self.calls += 1
+                return None
+        f = Foo()
+        completer = rlcompleter.Completer(dict(f=f))
+        self.assertEqual(completer.complete('f.b', 0), 'f.bar')
+        self.assertEqual(f.calls, 1)
+
 def test_main():
     support.run_unittest(TestRlcompleter)
 
+    def test_duplicate_globals(self):
+        namespace = {
+            'False': None,  # Keyword vs builtin vs namespace
+            'assert': None,  # Keyword vs namespace
+            'try': lambda: None,  # Keyword vs callable
+            'memoryview': None,  # Callable builtin vs non-callable
+            'Ellipsis': lambda: None,  # Non-callable builtin vs callable
+        }
+        completer = rlcompleter.Completer(namespace)
+        self.assertEqual(completer.complete('False', 0), 'False')
+        self.assertIsNone(completer.complete('False', 1))  # No duplicates
+        self.assertEqual(completer.complete('assert', 0), 'assert')
+        self.assertIsNone(completer.complete('assert', 1))
+        self.assertEqual(completer.complete('try', 0), 'try')
+        self.assertIsNone(completer.complete('try', 1))
+        # No opening bracket "(" because we overrode the built-in class
+        self.assertEqual(completer.complete('memoryview', 0), 'memoryview')
+        self.assertIsNone(completer.complete('memoryview', 1))
+        self.assertEqual(completer.complete('Ellipsis', 0), 'Ellipsis(')
+        self.assertIsNone(completer.complete('Ellipsis', 1))
+
 
 if __name__ == '__main__':
     test_main()
diff --git a/lib/python2.7/test/test_runpy.py b/lib/python2.7/test/test_runpy.py
index 76858d5..7f9fefa 100644
--- a/lib/python2.7/test/test_runpy.py
+++ b/lib/python2.7/test/test_runpy.py
@@ -270,6 +270,30 @@
             if verbose: print "Testing package depth:", depth
             self._check_package(depth)
 
+    def test_run_package_init_exceptions(self):
+        # These were previously wrapped in an ImportError; see Issue 14285
+        exceptions = (ImportError, AttributeError, TypeError, ValueError)
+        for exception in exceptions:
+            name = exception.__name__
+            source = "raise {0}('{0} in __init__.py.')".format(name)
+
+            result = self._make_pkg("", 1, "__main__")
+            pkg_dir, _, mod_name = result
+            mod_name = mod_name.replace(".__main__", "")
+            try:
+                init = os.path.join(pkg_dir, "__runpy_pkg__", "__init__.py")
+                with open(init, "wt") as mod_file:
+                    mod_file.write(source)
+                try:
+                    run_module(mod_name)
+                except exception as err:
+                    msg = "cannot be directly executed"
+                    self.assertNotIn(msg, format(err))
+                else:
+                    self.fail("Nothing raised; expected {}".format(name))
+            finally:
+                self._del_pkg(pkg_dir, 1, mod_name)
+
     def test_explicit_relative_import(self):
         for depth in range(2, 5):
             if verbose: print "Testing relative imports at depth:", depth
diff --git a/lib/python2.7/test/test_set.py b/lib/python2.7/test/test_set.py
index dd65202..d9ea098 100644
--- a/lib/python2.7/test/test_set.py
+++ b/lib/python2.7/test/test_set.py
@@ -1648,6 +1648,17 @@
         be_bad = True
         set1.symmetric_difference_update(dict2)
 
+    def test_iter_and_mutate(self):
+        # Issue #24581
+        s = set(range(100))
+        s.clear()
+        s.update(range(100))
+        si = iter(s)
+        s.clear()
+        a = list(range(100))
+        s.update(range(100))
+        list(si)
+
 # Application tests (based on David Eppstein's graph recipes ====================================
 
 def powerset(U):
diff --git a/lib/python2.7/test/test_shutil.py b/lib/python2.7/test/test_shutil.py
index bcabe75..c85f25e 100644
--- a/lib/python2.7/test/test_shutil.py
+++ b/lib/python2.7/test/test_shutil.py
@@ -8,15 +8,15 @@
 import os
 import os.path
 import errno
-from os.path import splitdrive
-from distutils.spawn import find_executable, spawn
-from shutil import (_make_tarball, _make_zipfile, make_archive,
+import subprocess
+from distutils.spawn import find_executable
+from shutil import (make_archive,
                     register_archive_format, unregister_archive_format,
                     get_archive_formats)
 import tarfile
 import warnings
 
-from test import test_support
+from test import test_support as support
 from test.test_support import TESTFN, check_warnings, captured_stdout
 
 TESTFN2 = TESTFN + "2"
@@ -374,139 +374,165 @@
     @unittest.skipUnless(zlib, "requires zlib")
     def test_make_tarball(self):
         # creating something to tar
-        tmpdir = self.mkdtemp()
-        self.write_file([tmpdir, 'file1'], 'xxx')
-        self.write_file([tmpdir, 'file2'], 'xxx')
-        os.mkdir(os.path.join(tmpdir, 'sub'))
-        self.write_file([tmpdir, 'sub', 'file3'], 'xxx')
+        root_dir, base_dir = self._create_files('')
 
         tmpdir2 = self.mkdtemp()
         # force shutil to create the directory
         os.rmdir(tmpdir2)
-        unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
-                            "source and target should be on same drive")
+        # working with relative paths
+        work_dir = os.path.dirname(tmpdir2)
+        rel_base_name = os.path.join(os.path.basename(tmpdir2), 'archive')
 
-        base_name = os.path.join(tmpdir2, 'archive')
-
-        # working with relative paths to avoid tar warnings
-        old_dir = os.getcwd()
-        os.chdir(tmpdir)
-        try:
-            _make_tarball(splitdrive(base_name)[1], '.')
-        finally:
-            os.chdir(old_dir)
+        with support.change_cwd(work_dir):
+            base_name = os.path.abspath(rel_base_name)
+            tarball = make_archive(rel_base_name, 'gztar', root_dir, '.')
 
         # check if the compressed tarball was created
-        tarball = base_name + '.tar.gz'
-        self.assertTrue(os.path.exists(tarball))
+        self.assertEqual(tarball, base_name + '.tar.gz')
+        self.assertTrue(os.path.isfile(tarball))
+        self.assertTrue(tarfile.is_tarfile(tarball))
+        with tarfile.open(tarball, 'r:gz') as tf:
+            self.assertEqual(sorted(tf.getnames()),
+                             ['.', './file1', './file2',
+                              './sub', './sub/file3', './sub2'])
 
         # trying an uncompressed one
-        base_name = os.path.join(tmpdir2, 'archive')
-        old_dir = os.getcwd()
-        os.chdir(tmpdir)
-        try:
-            _make_tarball(splitdrive(base_name)[1], '.', compress=None)
-        finally:
-            os.chdir(old_dir)
-        tarball = base_name + '.tar'
-        self.assertTrue(os.path.exists(tarball))
+        with support.change_cwd(work_dir):
+            tarball = make_archive(rel_base_name, 'tar', root_dir, '.')
+        self.assertEqual(tarball, base_name + '.tar')
+        self.assertTrue(os.path.isfile(tarball))
+        self.assertTrue(tarfile.is_tarfile(tarball))
+        with tarfile.open(tarball, 'r') as tf:
+            self.assertEqual(sorted(tf.getnames()),
+                             ['.', './file1', './file2',
+                              './sub', './sub/file3', './sub2'])
 
     def _tarinfo(self, path):
-        tar = tarfile.open(path)
-        try:
+        with tarfile.open(path) as tar:
             names = tar.getnames()
             names.sort()
             return tuple(names)
-        finally:
-            tar.close()
 
-    def _create_files(self):
+    def _create_files(self, base_dir='dist'):
         # creating something to tar
-        tmpdir = self.mkdtemp()
-        dist = os.path.join(tmpdir, 'dist')
-        os.mkdir(dist)
-        self.write_file([dist, 'file1'], 'xxx')
-        self.write_file([dist, 'file2'], 'xxx')
+        root_dir = self.mkdtemp()
+        dist = os.path.join(root_dir, base_dir)
+        if not os.path.isdir(dist):
+            os.makedirs(dist)
+        self.write_file((dist, 'file1'), 'xxx')
+        self.write_file((dist, 'file2'), 'xxx')
         os.mkdir(os.path.join(dist, 'sub'))
-        self.write_file([dist, 'sub', 'file3'], 'xxx')
+        self.write_file((dist, 'sub', 'file3'), 'xxx')
         os.mkdir(os.path.join(dist, 'sub2'))
-        tmpdir2 = self.mkdtemp()
-        base_name = os.path.join(tmpdir2, 'archive')
-        return tmpdir, tmpdir2, base_name
+        if base_dir:
+            self.write_file((root_dir, 'outer'), 'xxx')
+        return root_dir, base_dir
 
     @unittest.skipUnless(zlib, "Requires zlib")
-    @unittest.skipUnless(find_executable('tar') and find_executable('gzip'),
+    @unittest.skipUnless(find_executable('tar'),
                          'Need the tar command to run')
     def test_tarfile_vs_tar(self):
-        tmpdir, tmpdir2, base_name =  self._create_files()
-        old_dir = os.getcwd()
-        os.chdir(tmpdir)
-        try:
-            _make_tarball(base_name, 'dist')
-        finally:
-            os.chdir(old_dir)
+        root_dir, base_dir = self._create_files()
+        base_name = os.path.join(self.mkdtemp(), 'archive')
+        tarball = make_archive(base_name, 'gztar', root_dir, base_dir)
 
         # check if the compressed tarball was created
-        tarball = base_name + '.tar.gz'
-        self.assertTrue(os.path.exists(tarball))
+        self.assertEqual(tarball, base_name + '.tar.gz')
+        self.assertTrue(os.path.isfile(tarball))
 
         # now create another tarball using `tar`
-        tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
-        tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
-        gzip_cmd = ['gzip', '-f9', 'archive2.tar']
-        old_dir = os.getcwd()
-        os.chdir(tmpdir)
-        try:
-            with captured_stdout() as s:
-                spawn(tar_cmd)
-                spawn(gzip_cmd)
-        finally:
-            os.chdir(old_dir)
+        tarball2 = os.path.join(root_dir, 'archive2.tar')
+        tar_cmd = ['tar', '-cf', 'archive2.tar', base_dir]
+        subprocess.check_call(tar_cmd, cwd=root_dir)
 
-        self.assertTrue(os.path.exists(tarball2))
+        self.assertTrue(os.path.isfile(tarball2))
         # let's compare both tarballs
         self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
 
         # trying an uncompressed one
-        base_name = os.path.join(tmpdir2, 'archive')
-        old_dir = os.getcwd()
-        os.chdir(tmpdir)
-        try:
-            _make_tarball(base_name, 'dist', compress=None)
-        finally:
-            os.chdir(old_dir)
-        tarball = base_name + '.tar'
-        self.assertTrue(os.path.exists(tarball))
+        tarball = make_archive(base_name, 'tar', root_dir, base_dir)
+        self.assertEqual(tarball, base_name + '.tar')
+        self.assertTrue(os.path.isfile(tarball))
 
         # now for a dry_run
-        base_name = os.path.join(tmpdir2, 'archive')
-        old_dir = os.getcwd()
-        os.chdir(tmpdir)
-        try:
-            _make_tarball(base_name, 'dist', compress=None, dry_run=True)
-        finally:
-            os.chdir(old_dir)
-        tarball = base_name + '.tar'
-        self.assertTrue(os.path.exists(tarball))
+        tarball = make_archive(base_name, 'tar', root_dir, base_dir,
+                               dry_run=True)
+        self.assertEqual(tarball, base_name + '.tar')
+        self.assertTrue(os.path.isfile(tarball))
 
     @unittest.skipUnless(zlib, "Requires zlib")
     @unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
     def test_make_zipfile(self):
-        # creating something to tar
-        tmpdir = self.mkdtemp()
-        self.write_file([tmpdir, 'file1'], 'xxx')
-        self.write_file([tmpdir, 'file2'], 'xxx')
+        # creating something to zip
+        root_dir, base_dir = self._create_files()
 
         tmpdir2 = self.mkdtemp()
         # force shutil to create the directory
         os.rmdir(tmpdir2)
-        base_name = os.path.join(tmpdir2, 'archive')
-        _make_zipfile(base_name, tmpdir)
+        # working with relative paths
+        work_dir = os.path.dirname(tmpdir2)
+        rel_base_name = os.path.join(os.path.basename(tmpdir2), 'archive')
 
-        # check if the compressed tarball was created
-        tarball = base_name + '.zip'
-        self.assertTrue(os.path.exists(tarball))
+        with support.change_cwd(work_dir):
+            base_name = os.path.abspath(rel_base_name)
+            res = make_archive(rel_base_name, 'zip', root_dir, base_dir)
 
+        self.assertEqual(res, base_name + '.zip')
+        self.assertTrue(os.path.isfile(res))
+        self.assertTrue(zipfile.is_zipfile(res))
+        with zipfile.ZipFile(res) as zf:
+            self.assertEqual(sorted(zf.namelist()),
+                    ['dist/', 'dist/file1', 'dist/file2',
+                     'dist/sub/', 'dist/sub/file3', 'dist/sub2/'])
+
+    @unittest.skipUnless(zlib, "Requires zlib")
+    @unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
+    @unittest.skipUnless(find_executable('zip'),
+                         'Need the zip command to run')
+    def test_zipfile_vs_zip(self):
+        root_dir, base_dir = self._create_files()
+        base_name = os.path.join(self.mkdtemp(), 'archive')
+        archive = make_archive(base_name, 'zip', root_dir, base_dir)
+
+        # check if ZIP file  was created
+        self.assertEqual(archive, base_name + '.zip')
+        self.assertTrue(os.path.isfile(archive))
+
+        # now create another ZIP file using `zip`
+        archive2 = os.path.join(root_dir, 'archive2.zip')
+        zip_cmd = ['zip', '-q', '-r', 'archive2.zip', base_dir]
+        subprocess.check_call(zip_cmd, cwd=root_dir)
+
+        self.assertTrue(os.path.isfile(archive2))
+        # let's compare both ZIP files
+        with zipfile.ZipFile(archive) as zf:
+            names = zf.namelist()
+        with zipfile.ZipFile(archive2) as zf:
+            names2 = zf.namelist()
+        self.assertEqual(sorted(names), sorted(names2))
+
+    @unittest.skipUnless(zlib, "Requires zlib")
+    @unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
+    @unittest.skipUnless(find_executable('unzip'),
+                         'Need the unzip command to run')
+    def test_unzip_zipfile(self):
+        root_dir, base_dir = self._create_files()
+        base_name = os.path.join(self.mkdtemp(), 'archive')
+        archive = make_archive(base_name, 'zip', root_dir, base_dir)
+
+        # check if ZIP file  was created
+        self.assertEqual(archive, base_name + '.zip')
+        self.assertTrue(os.path.isfile(archive))
+
+        # now check the ZIP file using `unzip -t`
+        zip_cmd = ['unzip', '-t', archive]
+        with support.change_cwd(root_dir):
+            try:
+                subprocess.check_output(zip_cmd, stderr=subprocess.STDOUT)
+            except subprocess.CalledProcessError as exc:
+                details = exc.output
+                msg = "{}\n\n**Unzip Output**\n{}"
+                self.fail(msg.format(exc, details))
 
     def test_make_archive(self):
         tmpdir = self.mkdtemp()
@@ -523,39 +549,36 @@
         else:
             group = owner = 'root'
 
-        base_dir, root_dir, base_name =  self._create_files()
-        base_name = os.path.join(self.mkdtemp() , 'archive')
+        root_dir, base_dir = self._create_files()
+        base_name = os.path.join(self.mkdtemp(), 'archive')
         res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
                            group=group)
-        self.assertTrue(os.path.exists(res))
+        self.assertTrue(os.path.isfile(res))
 
         res = make_archive(base_name, 'zip', root_dir, base_dir)
-        self.assertTrue(os.path.exists(res))
+        self.assertTrue(os.path.isfile(res))
 
         res = make_archive(base_name, 'tar', root_dir, base_dir,
                            owner=owner, group=group)
-        self.assertTrue(os.path.exists(res))
+        self.assertTrue(os.path.isfile(res))
 
         res = make_archive(base_name, 'tar', root_dir, base_dir,
                            owner='kjhkjhkjg', group='oihohoh')
-        self.assertTrue(os.path.exists(res))
+        self.assertTrue(os.path.isfile(res))
 
     @unittest.skipUnless(zlib, "Requires zlib")
     @unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
     def test_tarfile_root_owner(self):
-        tmpdir, tmpdir2, base_name =  self._create_files()
-        old_dir = os.getcwd()
-        os.chdir(tmpdir)
+        root_dir, base_dir = self._create_files()
+        base_name = os.path.join(self.mkdtemp(), 'archive')
         group = grp.getgrgid(0)[0]
         owner = pwd.getpwuid(0)[0]
-        try:
-            archive_name = _make_tarball(base_name, 'dist', compress=None,
-                                         owner=owner, group=group)
-        finally:
-            os.chdir(old_dir)
+        with support.change_cwd(root_dir):
+            archive_name = make_archive(base_name, 'gztar', root_dir, 'dist',
+                                        owner=owner, group=group)
 
         # check if the compressed tarball was created
-        self.assertTrue(os.path.exists(archive_name))
+        self.assertTrue(os.path.isfile(archive_name))
 
         # now checks the rights
         archive = tarfile.open(archive_name)
@@ -889,7 +912,7 @@
 
 
 def test_main():
-    test_support.run_unittest(TestShutil, TestMove, TestCopyFile)
+    support.run_unittest(TestShutil, TestMove, TestCopyFile)
 
 if __name__ == '__main__':
     test_main()
diff --git a/lib/python2.7/test/test_site.py b/lib/python2.7/test/test_site.py
index 0898449..78c4809 100644
--- a/lib/python2.7/test/test_site.py
+++ b/lib/python2.7/test/test_site.py
@@ -26,8 +26,13 @@
 
 if site.ENABLE_USER_SITE and not os.path.isdir(site.USER_SITE):
     # need to add user site directory for tests
-    os.makedirs(site.USER_SITE)
-    site.addsitedir(site.USER_SITE)
+    try:
+        os.makedirs(site.USER_SITE)
+        site.addsitedir(site.USER_SITE)
+    except OSError as exc:
+        raise unittest.SkipTest('unable to create user site directory (%r): %s'
+                                % (site.USER_SITE, exc))
+
 
 class HelperFunctionsTests(unittest.TestCase):
     """Tests for helper functions.
diff --git a/lib/python2.7/test/test_slice.py b/lib/python2.7/test/test_slice.py
index 3304d6b..68518d7 100644
--- a/lib/python2.7/test/test_slice.py
+++ b/lib/python2.7/test/test_slice.py
@@ -18,7 +18,8 @@
     def test_hash(self):
         # Verify clearing of SF bug #800796
         self.assertRaises(TypeError, hash, slice(5))
-        self.assertRaises(TypeError, slice(5).__hash__)
+        with self.assertRaises(TypeError):
+            slice(5).__hash__()
 
     def test_cmp(self):
         s1 = slice(1, 2, 3)
diff --git a/lib/python2.7/test/test_socketserver.py b/lib/python2.7/test/test_socketserver.py
index 8707017..d645d20 100644
--- a/lib/python2.7/test/test_socketserver.py
+++ b/lib/python2.7/test/test_socketserver.py
@@ -158,6 +158,8 @@
         if verbose: print "waiting for server"
         server.shutdown()
         t.join()
+        server.server_close()
+        self.assertRaises(socket.error, server.socket.fileno)
         if verbose: print "done"
 
     def stream_examine(self, proto, addr):
@@ -173,6 +175,8 @@
 
     def dgram_examine(self, proto, addr):
         s = socket.socket(proto, socket.SOCK_DGRAM)
+        if HAVE_UNIX_SOCKETS and proto == socket.AF_UNIX:
+            s.bind(self.pickaddr(proto))
         s.sendto(TEST_STR, addr)
         buf = data = receive(s, 100)
         while data and '\n' not in buf:
@@ -267,27 +271,24 @@
             # Make sure select was called again:
             self.assertGreater(mock_select.called, 1)
 
-    # Alas, on Linux (at least) recvfrom() doesn't return a meaningful
-    # client address so this cannot work:
+    @requires_unix_sockets
+    def test_UnixDatagramServer(self):
+        self.run_server(SocketServer.UnixDatagramServer,
+                        SocketServer.DatagramRequestHandler,
+                        self.dgram_examine)
 
-    # @requires_unix_sockets
-    # def test_UnixDatagramServer(self):
-    #     self.run_server(SocketServer.UnixDatagramServer,
-    #                     SocketServer.DatagramRequestHandler,
-    #                     self.dgram_examine)
-    #
-    # @requires_unix_sockets
-    # def test_ThreadingUnixDatagramServer(self):
-    #     self.run_server(SocketServer.ThreadingUnixDatagramServer,
-    #                     SocketServer.DatagramRequestHandler,
-    #                     self.dgram_examine)
-    #
-    # @requires_unix_sockets
-    # @requires_forking
-    # def test_ForkingUnixDatagramServer(self):
-    #     self.run_server(SocketServer.ForkingUnixDatagramServer,
-    #                     SocketServer.DatagramRequestHandler,
-    #                     self.dgram_examine)
+    @requires_unix_sockets
+    def test_ThreadingUnixDatagramServer(self):
+        self.run_server(SocketServer.ThreadingUnixDatagramServer,
+                        SocketServer.DatagramRequestHandler,
+                        self.dgram_examine)
+
+    @requires_unix_sockets
+    @requires_forking
+    def test_ForkingUnixDatagramServer(self):
+        self.run_server(ForkingUnixDatagramServer,
+                        SocketServer.DatagramRequestHandler,
+                        self.dgram_examine)
 
     @reap_threads
     def test_shutdown(self):
@@ -325,6 +326,30 @@
                                        SocketServer.StreamRequestHandler)
 
 
+class MiscTestCase(unittest.TestCase):
+
+    def test_shutdown_request_called_if_verify_request_false(self):
+        # Issue #26309: BaseServer should call shutdown_request even if
+        # verify_request is False
+
+        class MyServer(SocketServer.TCPServer):
+            def verify_request(self, request, client_address):
+                return False
+
+            shutdown_called = 0
+            def shutdown_request(self, request):
+                self.shutdown_called += 1
+                SocketServer.TCPServer.shutdown_request(self, request)
+
+        server = MyServer((HOST, 0), SocketServer.StreamRequestHandler)
+        s = socket.socket(server.address_family, socket.SOCK_STREAM)
+        s.connect(server.server_address)
+        s.close()
+        server.handle_request()
+        self.assertEqual(server.shutdown_called, 1)
+        server.server_close()
+
+
 def test_main():
     if imp.lock_held():
         # If the import lock is held, the threads will hang
diff --git a/lib/python2.7/test/test_ssl.py b/lib/python2.7/test/test_ssl.py
index e58f55a..e9723a7 100644
--- a/lib/python2.7/test/test_ssl.py
+++ b/lib/python2.7/test/test_ssl.py
@@ -57,11 +57,12 @@
 SIGNED_CERTFILE2 = data_file("keycert4.pem")
 SIGNING_CA = data_file("pycacert.pem")
 
-SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
+REMOTE_HOST = "self-signed.pythontest.net"
+REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem")
 
 EMPTYCERT = data_file("nullcert.pem")
 BADCERT = data_file("badcert.pem")
-WRONGCERT = data_file("XXXnonexisting.pem")
+NONEXISTINGCERT = data_file("XXXnonexisting.pem")
 BADKEY = data_file("badkey.pem")
 NOKIACERT = data_file("nokia.pem")
 NULLBYTECERT = data_file("nullbytecert.pem")
@@ -244,7 +245,7 @@
         self.assertEqual(p['subjectAltName'], san)
 
     def test_DER_to_PEM(self):
-        with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
+        with open(CAFILE_CACERT, 'r') as f:
             pem = f.read()
         d1 = ssl.PEM_cert_to_DER_cert(pem)
         p2 = ssl.DER_cert_to_PEM_cert(d1)
@@ -333,17 +334,42 @@
                                     s.connect, (HOST, 8080))
         with self.assertRaises(IOError) as cm:
             with closing(socket.socket()) as sock:
-                ssl.wrap_socket(sock, certfile=WRONGCERT)
+                ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
         self.assertEqual(cm.exception.errno, errno.ENOENT)
         with self.assertRaises(IOError) as cm:
             with closing(socket.socket()) as sock:
-                ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
+                ssl.wrap_socket(sock,
+                    certfile=CERTFILE, keyfile=NONEXISTINGCERT)
         self.assertEqual(cm.exception.errno, errno.ENOENT)
         with self.assertRaises(IOError) as cm:
             with closing(socket.socket()) as sock:
-                ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
+                ssl.wrap_socket(sock,
+                    certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
         self.assertEqual(cm.exception.errno, errno.ENOENT)
 
+    def bad_cert_test(self, certfile):
+        """Check that trying to use the given client certificate fails"""
+        certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
+                                   certfile)
+        sock = socket.socket()
+        self.addCleanup(sock.close)
+        with self.assertRaises(ssl.SSLError):
+            ssl.wrap_socket(sock,
+                            certfile=certfile,
+                            ssl_version=ssl.PROTOCOL_TLSv1)
+
+    def test_empty_cert(self):
+        """Wrapping with an empty cert file"""
+        self.bad_cert_test("nullcert.pem")
+
+    def test_malformed_cert(self):
+        """Wrapping with a badly formatted certificate (syntax error)"""
+        self.bad_cert_test("badcert.pem")
+
+    def test_malformed_key(self):
+        """Wrapping with a badly formatted key (syntax error)"""
+        self.bad_cert_test("badkey.pem")
+
     def test_match_hostname(self):
         def ok(cert, hostname):
             ssl.match_hostname(cert, hostname)
@@ -714,12 +740,12 @@
     @skip_if_broken_ubuntu_ssl
     def test_options(self):
         ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
-        # OP_ALL | OP_NO_SSLv2 is the default value
-        self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2,
-                         ctx.options)
-        ctx.options |= ssl.OP_NO_SSLv3
+        # OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
         self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
                          ctx.options)
+        ctx.options |= ssl.OP_NO_TLSv1
+        self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1,
+                         ctx.options)
         if can_clear_options():
             ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
             self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
@@ -772,7 +798,7 @@
         ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
         self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
         with self.assertRaises(IOError) as cm:
-            ctx.load_cert_chain(WRONGCERT)
+            ctx.load_cert_chain(NONEXISTINGCERT)
         self.assertEqual(cm.exception.errno, errno.ENOENT)
         with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
             ctx.load_cert_chain(BADCERT)
@@ -792,7 +818,7 @@
         # Mismatching key and cert
         ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
         with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"):
-            ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
+            ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
         # Password protected key and cert
         ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
         ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
@@ -858,7 +884,7 @@
         self.assertRaises(TypeError, ctx.load_verify_locations)
         self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
         with self.assertRaises(IOError) as cm:
-            ctx.load_verify_locations(WRONGCERT)
+            ctx.load_verify_locations(NONEXISTINGCERT)
         self.assertEqual(cm.exception.errno, errno.ENOENT)
         with self.assertRaises(IOError):
             ctx.load_verify_locations(u'')
@@ -936,7 +962,7 @@
         self.assertRaises(TypeError, ctx.load_dh_params)
         self.assertRaises(TypeError, ctx.load_dh_params, None)
         with self.assertRaises(IOError) as cm:
-            ctx.load_dh_params(WRONGCERT)
+            ctx.load_dh_params(NONEXISTINGCERT)
         self.assertEqual(cm.exception.errno, errno.ENOENT)
         with self.assertRaises(ssl.SSLError) as cm:
             ctx.load_dh_params(CERTFILE)
@@ -1013,7 +1039,7 @@
         ctx.load_verify_locations(CERTFILE)
         self.assertEqual(ctx.cert_store_stats(),
             {'x509_ca': 0, 'crl': 0, 'x509': 1})
-        ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
+        ctx.load_verify_locations(CAFILE_CACERT)
         self.assertEqual(ctx.cert_store_stats(),
             {'x509_ca': 1, 'crl': 0, 'x509': 2})
 
@@ -1023,8 +1049,8 @@
         # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
         ctx.load_verify_locations(CERTFILE)
         self.assertEqual(ctx.get_ca_certs(), [])
-        # but SVN_PYTHON_ORG_ROOT_CERT is a CA cert
-        ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
+        # but CAFILE_CACERT is a CA cert
+        ctx.load_verify_locations(CAFILE_CACERT)
         self.assertEqual(ctx.get_ca_certs(),
             [{'issuer': ((('organizationName', 'Root CA'),),
                          (('organizationalUnitName', 'http://www.cacert.org'),),
@@ -1040,7 +1066,7 @@
                           (('emailAddress', 'support@cacert.org'),)),
               'version': 3}])
 
-        with open(SVN_PYTHON_ORG_ROOT_CERT) as f:
+        with open(CAFILE_CACERT) as f:
             pem = f.read()
         der = ssl.PEM_cert_to_DER_cert(pem)
         self.assertEqual(ctx.get_ca_certs(True), [der])
@@ -1215,11 +1241,11 @@
 class NetworkedTests(unittest.TestCase):
 
     def test_connect(self):
-        with support.transient_internet("svn.python.org"):
+        with support.transient_internet(REMOTE_HOST):
             s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                 cert_reqs=ssl.CERT_NONE)
             try:
-                s.connect(("svn.python.org", 443))
+                s.connect((REMOTE_HOST, 443))
                 self.assertEqual({}, s.getpeercert())
             finally:
                 s.close()
@@ -1228,27 +1254,27 @@
             s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                 cert_reqs=ssl.CERT_REQUIRED)
             self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
-                                   s.connect, ("svn.python.org", 443))
+                                   s.connect, (REMOTE_HOST, 443))
             s.close()
 
             # this should succeed because we specify the root cert
             s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                 cert_reqs=ssl.CERT_REQUIRED,
-                                ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
+                                ca_certs=REMOTE_ROOT_CERT)
             try:
-                s.connect(("svn.python.org", 443))
+                s.connect((REMOTE_HOST, 443))
                 self.assertTrue(s.getpeercert())
             finally:
                 s.close()
 
     def test_connect_ex(self):
         # Issue #11326: check connect_ex() implementation
-        with support.transient_internet("svn.python.org"):
+        with support.transient_internet(REMOTE_HOST):
             s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                 cert_reqs=ssl.CERT_REQUIRED,
-                                ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
+                                ca_certs=REMOTE_ROOT_CERT)
             try:
-                self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
+                self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443)))
                 self.assertTrue(s.getpeercert())
             finally:
                 s.close()
@@ -1256,14 +1282,14 @@
     def test_non_blocking_connect_ex(self):
         # Issue #11326: non-blocking connect_ex() should allow handshake
         # to proceed after the socket gets ready.
-        with support.transient_internet("svn.python.org"):
+        with support.transient_internet(REMOTE_HOST):
             s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                 cert_reqs=ssl.CERT_REQUIRED,
-                                ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
+                                ca_certs=REMOTE_ROOT_CERT,
                                 do_handshake_on_connect=False)
             try:
                 s.setblocking(False)
-                rc = s.connect_ex(('svn.python.org', 443))
+                rc = s.connect_ex((REMOTE_HOST, 443))
                 # EWOULDBLOCK under Windows, EINPROGRESS elsewhere
                 self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
                 # Wait for connect to finish
@@ -1285,58 +1311,62 @@
     def test_timeout_connect_ex(self):
         # Issue #12065: on a timeout, connect_ex() should return the original
         # errno (mimicking the behaviour of non-SSL sockets).
-        with support.transient_internet("svn.python.org"):
+        with support.transient_internet(REMOTE_HOST):
             s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                 cert_reqs=ssl.CERT_REQUIRED,
-                                ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
+                                ca_certs=REMOTE_ROOT_CERT,
                                 do_handshake_on_connect=False)
             try:
                 s.settimeout(0.0000001)
-                rc = s.connect_ex(('svn.python.org', 443))
+                rc = s.connect_ex((REMOTE_HOST, 443))
                 if rc == 0:
-                    self.skipTest("svn.python.org responded too quickly")
+                    self.skipTest("REMOTE_HOST responded too quickly")
                 self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
             finally:
                 s.close()
 
     def test_connect_ex_error(self):
-        with support.transient_internet("svn.python.org"):
+        with support.transient_internet(REMOTE_HOST):
             s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                 cert_reqs=ssl.CERT_REQUIRED,
-                                ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
+                                ca_certs=REMOTE_ROOT_CERT)
             try:
-                rc = s.connect_ex(("svn.python.org", 444))
+                rc = s.connect_ex((REMOTE_HOST, 444))
                 # Issue #19919: Windows machines or VMs hosted on Windows
                 # machines sometimes return EWOULDBLOCK.
-                self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK))
+                errors = (
+                    errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
+                    errno.EWOULDBLOCK,
+                )
+                self.assertIn(rc, errors)
             finally:
                 s.close()
 
     def test_connect_with_context(self):
-        with support.transient_internet("svn.python.org"):
+        with support.transient_internet(REMOTE_HOST):
             # Same as test_connect, but with a separately created context
             ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
             s = ctx.wrap_socket(socket.socket(socket.AF_INET))
-            s.connect(("svn.python.org", 443))
+            s.connect((REMOTE_HOST, 443))
             try:
                 self.assertEqual({}, s.getpeercert())
             finally:
                 s.close()
             # Same with a server hostname
             s = ctx.wrap_socket(socket.socket(socket.AF_INET),
-                                server_hostname="svn.python.org")
-            s.connect(("svn.python.org", 443))
+                                server_hostname=REMOTE_HOST)
+            s.connect((REMOTE_HOST, 443))
             s.close()
             # This should fail because we have no verification certs
             ctx.verify_mode = ssl.CERT_REQUIRED
             s = ctx.wrap_socket(socket.socket(socket.AF_INET))
             self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
-                                    s.connect, ("svn.python.org", 443))
+                                    s.connect, (REMOTE_HOST, 443))
             s.close()
             # This should succeed because we specify the root cert
-            ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
+            ctx.load_verify_locations(REMOTE_ROOT_CERT)
             s = ctx.wrap_socket(socket.socket(socket.AF_INET))
-            s.connect(("svn.python.org", 443))
+            s.connect((REMOTE_HOST, 443))
             try:
                 cert = s.getpeercert()
                 self.assertTrue(cert)
@@ -1349,12 +1379,12 @@
         # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
         # contain both versions of each certificate (same content, different
         # filename) for this test to be portable across OpenSSL releases.
-        with support.transient_internet("svn.python.org"):
+        with support.transient_internet(REMOTE_HOST):
             ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
             ctx.verify_mode = ssl.CERT_REQUIRED
             ctx.load_verify_locations(capath=CAPATH)
             s = ctx.wrap_socket(socket.socket(socket.AF_INET))
-            s.connect(("svn.python.org", 443))
+            s.connect((REMOTE_HOST, 443))
             try:
                 cert = s.getpeercert()
                 self.assertTrue(cert)
@@ -1365,7 +1395,7 @@
             ctx.verify_mode = ssl.CERT_REQUIRED
             ctx.load_verify_locations(capath=BYTES_CAPATH)
             s = ctx.wrap_socket(socket.socket(socket.AF_INET))
-            s.connect(("svn.python.org", 443))
+            s.connect((REMOTE_HOST, 443))
             try:
                 cert = s.getpeercert()
                 self.assertTrue(cert)
@@ -1373,15 +1403,15 @@
                 s.close()
 
     def test_connect_cadata(self):
-        with open(CAFILE_CACERT) as f:
+        with open(REMOTE_ROOT_CERT) as f:
             pem = f.read().decode('ascii')
         der = ssl.PEM_cert_to_DER_cert(pem)
-        with support.transient_internet("svn.python.org"):
+        with support.transient_internet(REMOTE_HOST):
             ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
             ctx.verify_mode = ssl.CERT_REQUIRED
             ctx.load_verify_locations(cadata=pem)
             with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
-                s.connect(("svn.python.org", 443))
+                s.connect((REMOTE_HOST, 443))
                 cert = s.getpeercert()
                 self.assertTrue(cert)
 
@@ -1390,7 +1420,7 @@
             ctx.verify_mode = ssl.CERT_REQUIRED
             ctx.load_verify_locations(cadata=der)
             with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
-                s.connect(("svn.python.org", 443))
+                s.connect((REMOTE_HOST, 443))
                 cert = s.getpeercert()
                 self.assertTrue(cert)
 
@@ -1399,9 +1429,9 @@
         # Issue #5238: creating a file-like object with makefile() shouldn't
         # delay closing the underlying "real socket" (here tested with its
         # file descriptor, hence skipping the test under Windows).
-        with support.transient_internet("svn.python.org"):
+        with support.transient_internet(REMOTE_HOST):
             ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
-            ss.connect(("svn.python.org", 443))
+            ss.connect((REMOTE_HOST, 443))
             fd = ss.fileno()
             f = ss.makefile()
             f.close()
@@ -1415,9 +1445,9 @@
             self.assertEqual(e.exception.errno, errno.EBADF)
 
     def test_non_blocking_handshake(self):
-        with support.transient_internet("svn.python.org"):
+        with support.transient_internet(REMOTE_HOST):
             s = socket.socket(socket.AF_INET)
-            s.connect(("svn.python.org", 443))
+            s.connect((REMOTE_HOST, 443))
             s.setblocking(False)
             s = ssl.wrap_socket(s,
                                 cert_reqs=ssl.CERT_NONE,
@@ -1460,12 +1490,12 @@
                 if support.verbose:
                     sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
 
-        _test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT)
+        _test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT)
         if support.IPV6_ENABLED:
             _test_get_server_certificate('ipv6.google.com', 443)
 
     def test_ciphers(self):
-        remote = ("svn.python.org", 443)
+        remote = (REMOTE_HOST, 443)
         with support.transient_internet(remote[0]):
             with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
                                          cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s:
@@ -1510,13 +1540,13 @@
 
     def test_get_ca_certs_capath(self):
         # capath certs are loaded on request
-        with support.transient_internet("svn.python.org"):
+        with support.transient_internet(REMOTE_HOST):
             ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
             ctx.verify_mode = ssl.CERT_REQUIRED
             ctx.load_verify_locations(capath=CAPATH)
             self.assertEqual(ctx.get_ca_certs(), [])
             s = ctx.wrap_socket(socket.socket(socket.AF_INET))
-            s.connect(("svn.python.org", 443))
+            s.connect((REMOTE_HOST, 443))
             try:
                 cert = s.getpeercert()
                 self.assertTrue(cert)
@@ -1527,12 +1557,12 @@
     @needs_sni
     def test_context_setget(self):
         # Check that the context of a connected socket can be replaced.
-        with support.transient_internet("svn.python.org"):
+        with support.transient_internet(REMOTE_HOST):
             ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
             ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
             s = socket.socket(socket.AF_INET)
             with closing(ctx1.wrap_socket(s)) as ss:
-                ss.connect(("svn.python.org", 443))
+                ss.connect((REMOTE_HOST, 443))
                 self.assertIs(ss.context, ctx1)
                 self.assertIs(ss._sslobj.context, ctx1)
                 ss.context = ctx2
@@ -1866,36 +1896,6 @@
             self.active = False
             self.server.close()
 
-    def bad_cert_test(certfile):
-        """
-        Launch a server with CERT_REQUIRED, and check that trying to
-        connect to it with the given client certificate fails.
-        """
-        server = ThreadedEchoServer(CERTFILE,
-                                    certreqs=ssl.CERT_REQUIRED,
-                                    cacerts=CERTFILE, chatty=False,
-                                    connectionchatty=False)
-        with server:
-            try:
-                with closing(socket.socket()) as sock:
-                    s = ssl.wrap_socket(sock,
-                                        certfile=certfile,
-                                        ssl_version=ssl.PROTOCOL_TLSv1)
-                    s.connect((HOST, server.port))
-            except ssl.SSLError as x:
-                if support.verbose:
-                    sys.stdout.write("\nSSLError is %s\n" % x.args[1])
-            except OSError as x:
-                if support.verbose:
-                    sys.stdout.write("\nOSError is %s\n" % x.args[1])
-            except OSError as x:
-                if x.errno != errno.ENOENT:
-                    raise
-                if support.verbose:
-                    sys.stdout.write("\OSError is %s\n" % str(x))
-            else:
-                raise AssertionError("Use of invalid cert should have failed!")
-
     def server_params_test(client_context, server_context, indata=b"FOO\n",
                            chatty=True, connectionchatty=False, sni_name=None):
         """
@@ -2134,22 +2134,38 @@
                                                 "check_hostname requires server_hostname"):
                         context.wrap_socket(s)
 
-        def test_empty_cert(self):
-            """Connecting with an empty cert file"""
-            bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
-                                      "nullcert.pem"))
-        def test_malformed_cert(self):
-            """Connecting with a badly formatted certificate (syntax error)"""
-            bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
-                                       "badcert.pem"))
-        def test_nonexisting_cert(self):
-            """Connecting with a non-existing cert file"""
-            bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
-                                       "wrongcert.pem"))
-        def test_malformed_key(self):
-            """Connecting with a badly formatted key (syntax error)"""
-            bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
-                                       "badkey.pem"))
+        def test_wrong_cert(self):
+            """Connecting when the server rejects the client's certificate
+
+            Launch a server with CERT_REQUIRED, and check that trying to
+            connect to it with a wrong client certificate fails.
+            """
+            certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
+                                       "wrongcert.pem")
+            server = ThreadedEchoServer(CERTFILE,
+                                        certreqs=ssl.CERT_REQUIRED,
+                                        cacerts=CERTFILE, chatty=False,
+                                        connectionchatty=False)
+            with server, \
+                    closing(socket.socket()) as sock, \
+                    closing(ssl.wrap_socket(sock,
+                                        certfile=certfile,
+                                        ssl_version=ssl.PROTOCOL_TLSv1)) as s:
+                try:
+                    # Expect either an SSL error about the server rejecting
+                    # the connection, or a low-level connection reset (which
+                    # sometimes happens on Windows)
+                    s.connect((HOST, server.port))
+                except ssl.SSLError as e:
+                    if support.verbose:
+                        sys.stdout.write("\nSSLError is %r\n" % e)
+                except socket.error as e:
+                    if e.errno != errno.ECONNRESET:
+                        raise
+                    if support.verbose:
+                        sys.stdout.write("\nsocket.error is %r\n" % e)
+                else:
+                    self.fail("Use of invalid cert should have failed!")
 
         def test_rude_shutdown(self):
             """A brutal shutdown of an SSL server should raise an OSError
@@ -2230,17 +2246,17 @@
                             " SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
                             % str(x))
             if hasattr(ssl, 'PROTOCOL_SSLv3'):
-                try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3')
+                try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
             try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
             try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
 
             if hasattr(ssl, 'PROTOCOL_SSLv3'):
-                try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
+                try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
             try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
             try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
 
             if hasattr(ssl, 'PROTOCOL_SSLv3'):
-                try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
+                try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
             try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
             try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
 
@@ -2272,8 +2288,8 @@
             try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
             if no_sslv2_implies_sslv3_hello():
                 # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
-                try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, 'SSLv3',
-                                   client_options=ssl.OP_NO_SSLv2)
+                try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
+                                   False, client_options=ssl.OP_NO_SSLv2)
 
         @skip_if_broken_ubuntu_ssl
         def test_protocol_tlsv1(self):
@@ -3026,7 +3042,7 @@
             pass
 
     for filename in [
-        CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
+        CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE,
         ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
         SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
         BADCERT, BADKEY, EMPTYCERT]:
diff --git a/lib/python2.7/test/test_str.py b/lib/python2.7/test/test_str.py
index 2cd7966..5bb9f48 100644
--- a/lib/python2.7/test/test_str.py
+++ b/lib/python2.7/test/test_str.py
@@ -4,6 +4,9 @@
 from test import test_support, string_tests
 
 
+class StrSubclass(str):
+    pass
+
 class StrTest(
     string_tests.CommonTest,
     string_tests.MixinStrUnicodeUserStringTest,
@@ -107,6 +110,9 @@
         self.assertEqual(str(Foo6("bar")), "foos")
         self.assertEqual(str(Foo7("bar")), "foos")
         self.assertEqual(str(Foo8("foo")), "foofoo")
+        self.assertIs(type(str(Foo8("foo"))), Foo8)
+        self.assertEqual(StrSubclass(Foo8("foo")), "foofoo")
+        self.assertIs(type(StrSubclass(Foo8("foo"))), StrSubclass)
         self.assertEqual(str(Foo9("foo")), "string")
         self.assertEqual(unicode(Foo9("foo")), u"not unicode")
 
@@ -428,6 +434,11 @@
         self.assertEqual('{:{f}}{g}{}'.format(1, 3, g='g', f=2), ' 1g3')
         self.assertEqual('{f:{}}{}{g}'.format(2, 4, f=1, g='g'), ' 14g')
 
+    def test_format_c_overflow(self):
+        # issue #7267
+        self.assertRaises(OverflowError, '{0:c}'.format, -1)
+        self.assertRaises(OverflowError, '{0:c}'.format, 256)
+
     def test_buffer_is_readonly(self):
         self.assertRaises(TypeError, sys.stdin.readinto, b"")
 
diff --git a/lib/python2.7/test/test_strop.py b/lib/python2.7/test/test_strop.py
index 45c90a6..81d078e 100644
--- a/lib/python2.7/test/test_strop.py
+++ b/lib/python2.7/test/test_strop.py
@@ -141,6 +141,11 @@
         else:
             self.assertEqual(len(r), len(a) * 3)
 
+    @unittest.skipUnless(sys.maxsize == 2147483647, "only for 32-bit")
+    def test_stropreplace_overflow(self):
+        a = "A" * 0x10000
+        self.assertRaises(MemoryError, strop.replace, a, "A", a)
+
 transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
 
 
diff --git a/lib/python2.7/test/test_strptime.py b/lib/python2.7/test/test_strptime.py
index 7a47f9e..3d24941 100644
--- a/lib/python2.7/test/test_strptime.py
+++ b/lib/python2.7/test/test_strptime.py
@@ -4,8 +4,9 @@
 import time
 import locale
 import re
+import os
 import sys
-from test import test_support
+from test import test_support as support
 from datetime import date as datetime_date
 
 import _strptime
@@ -189,7 +190,7 @@
 
     def test_whitespace_substitution(self):
         # When pattern contains whitespace, make sure it is taken into account
-        # so as to not allow to subpatterns to end up next to each other and
+        # so as to not allow subpatterns to end up next to each other and
         # "steal" characters from each other.
         pattern = self.time_re.pattern('%j %H')
         self.assertFalse(re.match(pattern, "180"))
@@ -314,9 +315,10 @@
         tz_name = time.tzname[0]
         if tz_name.upper() in ("UTC", "GMT"):
             self.skipTest('need non-UTC/GMT timezone')
-        try:
-            original_tzname = time.tzname
-            original_daylight = time.daylight
+
+        with support.swap_attr(time, 'tzname', (tz_name, tz_name)), \
+             support.swap_attr(time, 'daylight', 1), \
+             support.swap_attr(time, 'tzset', lambda: None):
             time.tzname = (tz_name, tz_name)
             time.daylight = 1
             tz_value = _strptime._strptime_time(tz_name, "%Z")[8]
@@ -324,9 +326,6 @@
                     "%s lead to a timezone value of %s instead of -1 when "
                     "time.daylight set to %s and passing in %s" %
                     (time.tzname, tz_value, time.daylight, tz_name))
-        finally:
-            time.tzname = original_tzname
-            time.daylight = original_daylight
 
     def test_date_time(self):
         # Test %c directive
@@ -487,14 +486,14 @@
     def test_week_0(self):
         def check(value, format, *expected):
             self.assertEqual(_strptime._strptime_time(value, format)[:-1], expected)
-        check('2015 0 0', '%Y %U %w', 2014, 12, 28, 0, 0, 0, 6, -3)
+        check('2015 0 0', '%Y %U %w', 2014, 12, 28, 0, 0, 0, 6, 362)
         check('2015 0 0', '%Y %W %w', 2015, 1, 4, 0, 0, 0, 6, 4)
-        check('2015 0 1', '%Y %U %w', 2014, 12, 29, 0, 0, 0, 0, -2)
-        check('2015 0 1', '%Y %W %w', 2014, 12, 29, 0, 0, 0, 0, -2)
-        check('2015 0 2', '%Y %U %w', 2014, 12, 30, 0, 0, 0, 1, -1)
-        check('2015 0 2', '%Y %W %w', 2014, 12, 30, 0, 0, 0, 1, -1)
-        check('2015 0 3', '%Y %U %w', 2014, 12, 31, 0, 0, 0, 2, 0)
-        check('2015 0 3', '%Y %W %w', 2014, 12, 31, 0, 0, 0, 2, 0)
+        check('2015 0 1', '%Y %U %w', 2014, 12, 29, 0, 0, 0, 0, 363)
+        check('2015 0 1', '%Y %W %w', 2014, 12, 29, 0, 0, 0, 0, 363)
+        check('2015 0 2', '%Y %U %w', 2014, 12, 30, 0, 0, 0, 1, 364)
+        check('2015 0 2', '%Y %W %w', 2014, 12, 30, 0, 0, 0, 1, 364)
+        check('2015 0 3', '%Y %U %w', 2014, 12, 31, 0, 0, 0, 2, 365)
+        check('2015 0 3', '%Y %W %w', 2014, 12, 31, 0, 0, 0, 2, 365)
         check('2015 0 4', '%Y %U %w', 2015, 1, 1, 0, 0, 0, 3, 1)
         check('2015 0 4', '%Y %W %w', 2015, 1, 1, 0, 0, 0, 3, 1)
         check('2015 0 5', '%Y %U %w', 2015, 1, 2, 0, 0, 0, 4, 2)
@@ -502,6 +501,20 @@
         check('2015 0 6', '%Y %U %w', 2015, 1, 3, 0, 0, 0, 5, 3)
         check('2015 0 6', '%Y %W %w', 2015, 1, 3, 0, 0, 0, 5, 3)
 
+        check('2009 0 0', '%Y %U %w', 2008, 12, 28, 0, 0, 0, 6, 363)
+        check('2009 0 0', '%Y %W %w', 2009, 1, 4, 0, 0, 0, 6, 4)
+        check('2009 0 1', '%Y %U %w', 2008, 12, 29, 0, 0, 0, 0, 364)
+        check('2009 0 1', '%Y %W %w', 2008, 12, 29, 0, 0, 0, 0, 364)
+        check('2009 0 2', '%Y %U %w', 2008, 12, 30, 0, 0, 0, 1, 365)
+        check('2009 0 2', '%Y %W %w', 2008, 12, 30, 0, 0, 0, 1, 365)
+        check('2009 0 3', '%Y %U %w', 2008, 12, 31, 0, 0, 0, 2, 366)
+        check('2009 0 3', '%Y %W %w', 2008, 12, 31, 0, 0, 0, 2, 366)
+        check('2009 0 4', '%Y %U %w', 2009, 1, 1, 0, 0, 0, 3, 1)
+        check('2009 0 4', '%Y %W %w', 2009, 1, 1, 0, 0, 0, 3, 1)
+        check('2009 0 5', '%Y %U %w', 2009, 1, 2, 0, 0, 0, 4, 2)
+        check('2009 0 5', '%Y %W %w', 2009, 1, 2, 0, 0, 0, 4, 2)
+        check('2009 0 6', '%Y %U %w', 2009, 1, 3, 0, 0, 0, 5, 3)
+        check('2009 0 6', '%Y %W %w', 2009, 1, 3, 0, 0, 0, 5, 3)
 
 class CacheTests(unittest.TestCase):
     """Test that caching works properly."""
@@ -538,7 +551,7 @@
         _strptime._strptime_time("10", "%d")
         self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time)
 
-    def test_TimeRE_recreation(self):
+    def test_TimeRE_recreation_locale(self):
         # The TimeRE instance should be recreated upon changing the locale.
         locale_info = locale.getlocale(locale.LC_TIME)
         try:
@@ -567,9 +580,36 @@
         finally:
             locale.setlocale(locale.LC_TIME, locale_info)
 
+    @support.run_with_tz('STD-1DST')
+    def test_TimeRE_recreation_timezone(self):
+        # The TimeRE instance should be recreated upon changing the timezone.
+        oldtzname = time.tzname
+        tm = _strptime._strptime_time(time.tzname[0], '%Z')
+        self.assertEqual(tm.tm_isdst, 0)
+        tm = _strptime._strptime_time(time.tzname[1], '%Z')
+        self.assertEqual(tm.tm_isdst, 1)
+        # Get id of current cache object.
+        first_time_re = _strptime._TimeRE_cache
+        # Change the timezone and force a recreation of the cache.
+        os.environ['TZ'] = 'EST+05EDT,M3.2.0,M11.1.0'
+        time.tzset()
+        tm = _strptime._strptime_time(time.tzname[0], '%Z')
+        self.assertEqual(tm.tm_isdst, 0)
+        tm = _strptime._strptime_time(time.tzname[1], '%Z')
+        self.assertEqual(tm.tm_isdst, 1)
+        # Get the new cache object's id.
+        second_time_re = _strptime._TimeRE_cache
+        # They should not be equal.
+        self.assertIsNot(first_time_re, second_time_re)
+        # Make sure old names no longer accepted.
+        with self.assertRaises(ValueError):
+            _strptime._strptime_time(oldtzname[0], '%Z')
+        with self.assertRaises(ValueError):
+            _strptime._strptime_time(oldtzname[1], '%Z')
+
 
 def test_main():
-    test_support.run_unittest(
+    support.run_unittest(
         getlang_Tests,
         LocaleTime_Tests,
         TimeRETests,
diff --git a/lib/python2.7/test/test_subprocess.py b/lib/python2.7/test/test_subprocess.py
index 0efcdbf..06de108 100644
--- a/lib/python2.7/test/test_subprocess.py
+++ b/lib/python2.7/test/test_subprocess.py
@@ -32,16 +32,6 @@
     SETBINARY = ''
 
 
-try:
-    mkstemp = tempfile.mkstemp
-except AttributeError:
-    # tempfile.mkstemp is not available
-    def mkstemp():
-        """Replacement for mkstemp, calling mktemp."""
-        fname = tempfile.mktemp()
-        return os.open(fname, os.O_RDWR|os.O_CREAT), fname
-
-
 class BaseTestCase(unittest.TestCase):
     def setUp(self):
         # Try to minimize the number of children we have so this test
@@ -666,9 +656,9 @@
     def test_handles_closed_on_exception(self):
         # If CreateProcess exits with an error, ensure the
         # duplicate output handles are released
-        ifhandle, ifname = mkstemp()
-        ofhandle, ofname = mkstemp()
-        efhandle, efname = mkstemp()
+        ifhandle, ifname = tempfile.mkstemp()
+        ofhandle, ofname = tempfile.mkstemp()
+        efhandle, efname = tempfile.mkstemp()
         try:
             subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
               stderr=efhandle)
@@ -858,7 +848,7 @@
 
     def test_args_string(self):
         # args is a string
-        f, fname = mkstemp()
+        f, fname = tempfile.mkstemp()
         os.write(f, "#!/bin/sh\n")
         os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" %
                     sys.executable)
@@ -902,7 +892,7 @@
 
     def test_call_string(self):
         # call() function with string argument on UNIX
-        f, fname = mkstemp()
+        f, fname = tempfile.mkstemp()
         os.write(f, "#!/bin/sh\n")
         os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" %
                     sys.executable)
@@ -1058,7 +1048,7 @@
 
     def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
         # open up some temporary files
-        temps = [mkstemp() for i in range(3)]
+        temps = [tempfile.mkstemp() for i in range(3)]
         temp_fds = [fd for fd, fname in temps]
         try:
             # unlink the files -- we won't need to reopen them
@@ -1379,7 +1369,7 @@
 
     def setUp(self):
         super(CommandsWithSpaces, self).setUp()
-        f, fname = mkstemp(".py", "te st")
+        f, fname = tempfile.mkstemp(".py", "te st")
         self.fname = fname.lower ()
         os.write(f, b"import sys;"
                     b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
diff --git a/lib/python2.7/test/test_support.py b/lib/python2.7/test/test_support.py
index 75563cb..956936f 100644
--- a/lib/python2.7/test/test_support.py
+++ b/lib/python2.7/test/test_support.py
@@ -40,7 +40,7 @@
            "threading_cleanup", "reap_threads", "start_threads", "cpython_only",
            "check_impl_detail", "get_attribute", "py3k_bytes",
            "import_fresh_module", "threading_cleanup", "reap_children",
-           "strip_python_stderr", "IPV6_ENABLED"]
+           "strip_python_stderr", "IPV6_ENABLED", "run_with_tz"]
 
 class Error(Exception):
     """Base class for regression test exceptions."""
@@ -669,6 +669,33 @@
 SAVEDCWD = os.getcwd()
 
 @contextlib.contextmanager
+def change_cwd(path, quiet=False):
+    """Return a context manager that changes the current working directory.
+
+    Arguments:
+
+      path: the directory to use as the temporary current working directory.
+
+      quiet: if False (the default), the context manager raises an exception
+        on error.  Otherwise, it issues only a warning and keeps the current
+        working directory the same.
+
+    """
+    saved_dir = os.getcwd()
+    try:
+        os.chdir(path)
+    except OSError:
+        if not quiet:
+            raise
+        warnings.warn('tests may fail, unable to change CWD to: ' + path,
+                      RuntimeWarning, stacklevel=3)
+    try:
+        yield os.getcwd()
+    finally:
+        os.chdir(saved_dir)
+
+
+@contextlib.contextmanager
 def temp_cwd(name='tempcwd', quiet=False):
     """
     Context manager that creates a temporary directory and set it as CWD.
@@ -678,7 +705,8 @@
     the CWD, an error is raised.  If it's True, only a warning is raised
     and the original CWD is used.
     """
-    if have_unicode and isinstance(name, unicode):
+    if (have_unicode and isinstance(name, unicode) and
+        not os.path.supports_unicode_filenames):
         try:
             name = name.encode(sys.getfilesystemencoding() or 'ascii')
         except UnicodeEncodeError:
@@ -1198,6 +1226,39 @@
     return decorator
 
 #=======================================================================
+# Decorator for running a function in a specific timezone, correctly
+# resetting it afterwards.
+
+def run_with_tz(tz):
+    def decorator(func):
+        def inner(*args, **kwds):
+            try:
+                tzset = time.tzset
+            except AttributeError:
+                raise unittest.SkipTest("tzset required")
+            if 'TZ' in os.environ:
+                orig_tz = os.environ['TZ']
+            else:
+                orig_tz = None
+            os.environ['TZ'] = tz
+            tzset()
+
+            # now run the function, resetting the tz on exceptions
+            try:
+                return func(*args, **kwds)
+            finally:
+                if orig_tz is None:
+                    del os.environ['TZ']
+                else:
+                    os.environ['TZ'] = orig_tz
+                time.tzset()
+
+        inner.__name__ = func.__name__
+        inner.__doc__ = func.__doc__
+        return inner
+    return decorator
+
+#=======================================================================
 # Big-memory-test support. Separate from 'resources' because memory use should be configurable.
 
 # Some handy shorthands. Note that these are used for byte-limits as well
diff --git a/lib/python2.7/test/test_sys.py b/lib/python2.7/test/test_sys.py
index ab35ba4..6336a20 100644
--- a/lib/python2.7/test/test_sys.py
+++ b/lib/python2.7/test/test_sys.py
@@ -1,9 +1,12 @@
 # -*- coding: iso-8859-1 -*-
 import unittest, test.test_support
 from test.script_helper import assert_python_ok, assert_python_failure
-import sys, os, cStringIO
-import struct
+import cStringIO
+import gc
 import operator
+import os
+import struct
+import sys
 
 class SysModuleTest(unittest.TestCase):
 
@@ -412,7 +415,10 @@
     def test_43581(self):
         # Can't use sys.stdout, as this is a cStringIO object when
         # the test runs under regrtest.
-        self.assertTrue(sys.__stdout__.encoding == sys.__stderr__.encoding)
+        if not (os.environ.get('PYTHONIOENCODING') or
+                (sys.__stdout__.isatty() and sys.__stderr__.isatty())):
+            self.skipTest('stdout/stderr encoding is not set')
+        self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
 
     def test_sys_flags(self):
         self.assertTrue(sys.flags)
@@ -478,11 +484,6 @@
         self.longdigit = sys.long_info.sizeof_digit
         import _testcapi
         self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
-        self.file = open(test.test_support.TESTFN, 'wb')
-
-    def tearDown(self):
-        self.file.close()
-        test.test_support.unlink(test.test_support.TESTFN)
 
     check_sizeof = test.test_support.check_sizeof
 
@@ -526,6 +527,7 @@
 
     def test_objecttypes(self):
         # check all types defined in Objects/
+        calcsize = struct.calcsize
         size = test.test_support.calcobjsize
         vsize = test.test_support.calcvobjsize
         check = self.check_sizeof
@@ -589,9 +591,17 @@
         # method-wrapper (descriptor object)
         check({}.__iter__, size('2P'))
         # dict
-        check({}, size('3P2P' + 8*'P2P'))
+        check({}, size('3P2P') + 8*calcsize('P2P'))
         x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
-        check(x, size('3P2P' + 8*'P2P') + 16*struct.calcsize('P2P'))
+        check(x, size('3P2P') + 8*calcsize('P2P') + 16*calcsize('P2P'))
+        # dictionary-keyview
+        check({}.viewkeys(), size('P'))
+        # dictionary-valueview
+        check({}.viewvalues(), size('P'))
+        # dictionary-itemview
+        check({}.viewitems(), size('P'))
+        # dictionary iterator
+        check(iter({}), size('P2PPP'))
         # dictionary-keyiterator
         check({}.iterkeys(), size('P2PPP'))
         # dictionary-valueiterator
@@ -607,7 +617,12 @@
         # enumerate
         check(enumerate([]), size('l3P'))
         # file
-        check(self.file, size('4P2i4P3i3P3i'))
+        f = file(test.test_support.TESTFN, 'wb')
+        try:
+            check(f, size('4P2i4P3i3P3i'))
+        finally:
+            f.close()
+            test.test_support.unlink(test.test_support.TESTFN)
         # float
         check(float(0), size('d'))
         # sys.floatinfo
@@ -704,16 +719,16 @@
                 check(set(sample), s)
                 check(frozenset(sample), s)
             else:
-                check(set(sample), s + newsize*struct.calcsize('lP'))
-                check(frozenset(sample), s + newsize*struct.calcsize('lP'))
+                check(set(sample), s + newsize*calcsize('lP'))
+                check(frozenset(sample), s + newsize*calcsize('lP'))
         # setiterator
         check(iter(set()), size('P3P'))
         # slice
         check(slice(1), size('3P'))
         # str
         vh = test.test_support._vheader
-        check('', struct.calcsize(vh + 'lic'))
-        check('abc', struct.calcsize(vh + 'lic') + 3)
+        check('', calcsize(vh + 'lic'))
+        check('abc', calcsize(vh + 'lic') + 3)
         # super
         check(super(int), size('3P'))
         # tuple
@@ -722,9 +737,12 @@
         # tupleiterator
         check(iter(()), size('lP'))
         # type
-        # (PyTypeObject + PyNumberMethods +  PyMappingMethods +
-        #  PySequenceMethods + PyBufferProcs)
-        s = vsize('P2P15Pl4PP9PP11PI') + struct.calcsize('41P 10P 3P 6P')
+        s = vsize('P2P15Pl4PP9PP11PI'   # PyTypeObject
+                  '39P'                 # PyNumberMethods
+                  '3P'                  # PyMappingMethods
+                  '10P'                 # PySequenceMethods
+                  '6P'                  # PyBufferProcs
+                  '2P')
         class newstyleclass(object):
             pass
         check(newstyleclass, s)
@@ -751,6 +769,32 @@
         check(xrange(1), size('3l'))
         check(xrange(66000), size('3l'))
 
+    def check_slots(self, obj, base, extra):
+        expected = sys.getsizeof(base) + struct.calcsize(extra)
+        if gc.is_tracked(obj) and not gc.is_tracked(base):
+            expected += self.gc_headsize
+        self.assertEqual(sys.getsizeof(obj), expected)
+
+    def test_slots(self):
+        # check all subclassable types defined in Objects/ that allow
+        # non-empty __slots__
+        check = self.check_slots
+        class BA(bytearray):
+            __slots__ = 'a', 'b', 'c'
+        check(BA(), bytearray(), '3P')
+        class D(dict):
+            __slots__ = 'a', 'b', 'c'
+        check(D(x=[]), {'x': []}, '3P')
+        class L(list):
+            __slots__ = 'a', 'b', 'c'
+        check(L(), [], '3P')
+        class S(set):
+            __slots__ = 'a', 'b', 'c'
+        check(S(), set(), '3P')
+        class FS(frozenset):
+            __slots__ = 'a', 'b', 'c'
+        check(FS(), frozenset(), '3P')
+
     def test_pythontypes(self):
         # check all types defined in Python/
         size = test.test_support.calcobjsize
@@ -761,7 +805,12 @@
         check(_ast.AST(), size(''))
         # imp.NullImporter
         import imp
-        check(imp.NullImporter(self.file.name), size(''))
+        f = open(test.test_support.TESTFN, 'wb')
+        try:
+            check(imp.NullImporter(f.name), size(''))
+        finally:
+            f.close()
+            test.test_support.unlink(test.test_support.TESTFN)
         try:
             raise TypeError
         except TypeError:
diff --git a/lib/python2.7/test/test_tarfile.py b/lib/python2.7/test/test_tarfile.py
index ff3265f..3f9e996 100644
--- a/lib/python2.7/test/test_tarfile.py
+++ b/lib/python2.7/test/test_tarfile.py
@@ -1,5 +1,3 @@
-# -*- coding: iso-8859-15 -*-
-
 import sys
 import os
 import shutil
@@ -11,6 +9,7 @@
 import tarfile
 
 from test import test_support
+from test import test_support as support
 
 # Check for our compression modules.
 try:
@@ -61,9 +60,10 @@
         self.tar.extract("ustar/regtype", TEMPDIR)
         tarinfo = self.tar.getmember("ustar/regtype")
         fobj1 = open(os.path.join(TEMPDIR, "ustar/regtype"), "rU")
+        with open(os.path.join(TEMPDIR, "ustar/regtype"), "rU") as fobj1:
+            lines1 = fobj1.readlines()
         fobj2 = self.tar.extractfile(tarinfo)
 
-        lines1 = fobj1.readlines()
         lines2 = fobj2.readlines()
         self.assertTrue(lines1 == lines2,
                 "fileobj.readlines() failed")
@@ -76,18 +76,17 @@
     def test_fileobj_iter(self):
         self.tar.extract("ustar/regtype", TEMPDIR)
         tarinfo = self.tar.getmember("ustar/regtype")
-        fobj1 = open(os.path.join(TEMPDIR, "ustar/regtype"), "rU")
+        with open(os.path.join(TEMPDIR, "ustar/regtype"), "rU") as fobj1:
+            lines1 = fobj1.readlines()
         fobj2 = self.tar.extractfile(tarinfo)
-        lines1 = fobj1.readlines()
         lines2 = [line for line in fobj2]
         self.assertTrue(lines1 == lines2,
                      "fileobj.__iter__() failed")
 
     def test_fileobj_seek(self):
         self.tar.extract("ustar/regtype", TEMPDIR)
-        fobj = open(os.path.join(TEMPDIR, "ustar/regtype"), "rb")
-        data = fobj.read()
-        fobj.close()
+        with open(os.path.join(TEMPDIR, "ustar/regtype"), "rb") as fobj:
+            data = fobj.read()
 
         tarinfo = self.tar.getmember("ustar/regtype")
         fobj = self.tar.extractfile(tarinfo)
@@ -239,19 +238,24 @@
         # This test checks if tarfile.open() is able to open an empty tar
         # archive successfully. Note that an empty tar archive is not the
         # same as an empty file!
-        tarfile.open(tmpname, self.mode.replace("r", "w")).close()
+        with tarfile.open(tmpname, self.mode.replace("r", "w")):
+            pass
         try:
             tar = tarfile.open(tmpname, self.mode)
             tar.getnames()
         except tarfile.ReadError:
             self.fail("tarfile.open() failed on empty archive")
-        self.assertListEqual(tar.getmembers(), [])
+        else:
+            self.assertListEqual(tar.getmembers(), [])
+        finally:
+            tar.close()
 
     def test_null_tarfile(self):
         # Test for issue6123: Allow opening empty archives.
         # This test guarantees that tarfile.open() does not treat an empty
         # file as an empty tar archive.
-        open(tmpname, "wb").close()
+        with open(tmpname, "wb"):
+            pass
         self.assertRaises(tarfile.ReadError, tarfile.open, tmpname, self.mode)
         self.assertRaises(tarfile.ReadError, tarfile.open, tmpname)
 
@@ -275,34 +279,61 @@
         for char in ('\0', 'a'):
             # Test if EOFHeaderError ('\0') and InvalidHeaderError ('a')
             # are ignored correctly.
-            fobj = _open(tmpname, "wb")
-            fobj.write(char * 1024)
-            fobj.write(tarfile.TarInfo("foo").tobuf())
-            fobj.close()
+            with _open(tmpname, "wb") as fobj:
+                fobj.write(char * 1024)
+                fobj.write(tarfile.TarInfo("foo").tobuf())
 
             tar = tarfile.open(tmpname, mode="r", ignore_zeros=True)
-            self.assertListEqual(tar.getnames(), ["foo"],
+            try:
+                self.assertListEqual(tar.getnames(), ["foo"],
                     "ignore_zeros=True should have skipped the %r-blocks" % char)
-            tar.close()
+            finally:
+                tar.close()
+
+    def test_premature_end_of_archive(self):
+        for size in (512, 600, 1024, 1200):
+            with tarfile.open(tmpname, "w:") as tar:
+                t = tarfile.TarInfo("foo")
+                t.size = 1024
+                tar.addfile(t, StringIO.StringIO("a" * 1024))
+
+            with open(tmpname, "r+b") as fobj:
+                fobj.truncate(size)
+
+            with tarfile.open(tmpname) as tar:
+                with self.assertRaisesRegexp(tarfile.ReadError, "unexpected end of data"):
+                    for t in tar:
+                        pass
+
+            with tarfile.open(tmpname) as tar:
+                t = tar.next()
+
+                with self.assertRaisesRegexp(tarfile.ReadError, "unexpected end of data"):
+                    tar.extract(t, TEMPDIR)
+
+                with self.assertRaisesRegexp(tarfile.ReadError, "unexpected end of data"):
+                    tar.extractfile(t).read()
 
 
 class MiscReadTest(CommonReadTest):
     taropen = tarfile.TarFile.taropen
 
     def test_no_name_argument(self):
-        fobj = open(self.tarname, "rb")
-        tar = tarfile.open(fileobj=fobj, mode=self.mode)
-        self.assertEqual(tar.name, os.path.abspath(fobj.name))
+        with open(self.tarname, "rb") as fobj:
+            tar = tarfile.open(fileobj=fobj, mode=self.mode)
+            self.assertEqual(tar.name, os.path.abspath(fobj.name))
 
     def test_no_name_attribute(self):
-        data = open(self.tarname, "rb").read()
+        with open(self.tarname, "rb") as fobj:
+            data = fobj.read()
         fobj = StringIO.StringIO(data)
         self.assertRaises(AttributeError, getattr, fobj, "name")
         tar = tarfile.open(fileobj=fobj, mode=self.mode)
         self.assertEqual(tar.name, None)
 
     def test_empty_name_attribute(self):
-        data = open(self.tarname, "rb").read()
+        with open(self.tarname, "rb") as fobj:
+            data = fobj.read()
         fobj = StringIO.StringIO(data)
         fobj.name = ""
         tar = tarfile.open(fileobj=fobj, mode=self.mode)
@@ -323,12 +354,14 @@
         # Skip the first member and store values from the second member
         # of the testtar.
         tar = tarfile.open(self.tarname, mode=self.mode)
-        tar.next()
-        t = tar.next()
-        name = t.name
-        offset = t.offset
-        data = tar.extractfile(t).read()
-        tar.close()
+        try:
+            tar.next()
+            t = tar.next()
+            name = t.name
+            offset = t.offset
+            data = tar.extractfile(t).read()
+        finally:
+            tar.close()
 
         # Open the testtar and seek to the offset of the second member.
         if self.mode.endswith(":gz"):
@@ -338,26 +371,30 @@
         else:
             _open = open
         fobj = _open(self.tarname, "rb")
-        fobj.seek(offset)
+        try:
+            fobj.seek(offset)
 
-        # Test if the tarfile starts with the second member.
-        tar = tar.open(self.tarname, mode="r:", fileobj=fobj)
-        t = tar.next()
-        self.assertEqual(t.name, name)
-        # Read to the end of fileobj and test if seeking back to the
-        # beginning works.
-        tar.getmembers()
-        self.assertEqual(tar.extractfile(t).read(), data,
-                "seek back did not work")
-        tar.close()
+            # Test if the tarfile starts with the second member.
+            tar = tar.open(self.tarname, mode="r:", fileobj=fobj)
+            t = tar.next()
+            self.assertEqual(t.name, name)
+            # Read to the end of fileobj and test if seeking back to the
+            # beginning works.
+            tar.getmembers()
+            self.assertEqual(tar.extractfile(t).read(), data,
+                    "seek back did not work")
+            tar.close()
+        finally:
+            fobj.close()
 
     def test_fail_comp(self):
         # For Gzip and Bz2 Tests: fail with a ReadError on an uncompressed file.
         if self.mode == "r:":
             self.skipTest('needs a gz or bz2 mode')
         self.assertRaises(tarfile.ReadError, tarfile.open, tarname, self.mode)
-        fobj = open(tarname, "rb")
-        self.assertRaises(tarfile.ReadError, tarfile.open, fileobj=fobj, mode=self.mode)
+        with open(tarname, "rb") as fobj:
+            self.assertRaises(tarfile.ReadError, tarfile.open,
+                              fileobj=fobj, mode=self.mode)
 
     def test_v7_dirtype(self):
         # Test old style dirtype member (bug #1336623):
@@ -411,22 +448,25 @@
         # Test if extractall() correctly restores directory permissions
         # and times (see issue1735).
         tar = tarfile.open(tarname, encoding="iso8859-1")
-        directories = [t for t in tar if t.isdir()]
-        tar.extractall(TEMPDIR, directories)
-        for tarinfo in directories:
-            path = os.path.join(TEMPDIR, tarinfo.name)
-            if sys.platform != "win32":
-                # Win32 has no support for fine grained permissions.
-                self.assertEqual(tarinfo.mode & 0777, os.stat(path).st_mode & 0777)
-            self.assertEqual(tarinfo.mtime, os.path.getmtime(path))
-        tar.close()
+        try:
+            directories = [t for t in tar if t.isdir()]
+            tar.extractall(TEMPDIR, directories)
+            for tarinfo in directories:
+                path = os.path.join(TEMPDIR, tarinfo.name)
+                if sys.platform != "win32":
+                    # Win32 has no support for fine grained permissions.
+                    self.assertEqual(tarinfo.mode & 0777, os.stat(path).st_mode & 0777)
+                self.assertEqual(tarinfo.mtime, os.path.getmtime(path))
+        finally:
+            tar.close()
 
     def test_init_close_fobj(self):
         # Issue #7341: Close the internal file object in the TarFile
         # constructor in case of an error. For the test we rely on
         # the fact that opening an empty file raises a ReadError.
         empty = os.path.join(TEMPDIR, "empty")
-        open(empty, "wb").write("")
+        with open(empty, "wb") as fobj:
+            fobj.write("")
 
         try:
             tar = object.__new__(tarfile.TarFile)
@@ -437,7 +477,7 @@
             else:
                 self.fail("ReadError not raised")
         finally:
-            os.remove(empty)
+            support.unlink(empty)
 
     def test_parallel_iteration(self):
         # Issue #16601: Restarting iteration over tarfile continued
@@ -466,42 +506,47 @@
 
     def test_compare_members(self):
         tar1 = tarfile.open(tarname, encoding="iso8859-1")
-        tar2 = self.tar
+        try:
+            tar2 = self.tar
 
-        while True:
-            t1 = tar1.next()
-            t2 = tar2.next()
-            if t1 is None:
-                break
-            self.assertTrue(t2 is not None, "stream.next() failed.")
+            while True:
+                t1 = tar1.next()
+                t2 = tar2.next()
+                if t1 is None:
+                    break
+                self.assertTrue(t2 is not None, "stream.next() failed.")
 
-            if t2.islnk() or t2.issym():
-                self.assertRaises(tarfile.StreamError, tar2.extractfile, t2)
-                continue
+                if t2.islnk() or t2.issym():
+                    self.assertRaises(tarfile.StreamError, tar2.extractfile, t2)
+                    continue
 
-            v1 = tar1.extractfile(t1)
-            v2 = tar2.extractfile(t2)
-            if v1 is None:
-                continue
-            self.assertTrue(v2 is not None, "stream.extractfile() failed")
-            self.assertTrue(v1.read() == v2.read(), "stream extraction failed")
-
-        tar1.close()
+                v1 = tar1.extractfile(t1)
+                v2 = tar2.extractfile(t2)
+                if v1 is None:
+                    continue
+                self.assertTrue(v2 is not None, "stream.extractfile() failed")
+                self.assertTrue(v1.read() == v2.read(), "stream extraction failed")
+        finally:
+            tar1.close()
 
 
 class DetectReadTest(unittest.TestCase):
 
     def _testfunc_file(self, name, mode):
         try:
-            tarfile.open(name, mode)
+            tar = tarfile.open(name, mode)
         except tarfile.ReadError:
             self.fail()
+        else:
+            tar.close()
 
     def _testfunc_fileobj(self, name, mode):
         try:
-            tarfile.open(name, mode, fileobj=open(name, "rb"))
+            tar = tarfile.open(name, mode, fileobj=open(name, "rb"))
         except tarfile.ReadError:
             self.fail()
+        else:
+            tar.close()
 
     def _test_modes(self, testfunc):
         testfunc(tarname, "r")
@@ -615,7 +660,7 @@
         self._test_member(tarinfo, size=86016, chksum=md5_sparse)
 
     def test_find_umlauts(self):
-        tarinfo = self.tar.getmember("ustar/umlauts-ÄÖÜäöüß")
+        tarinfo = self.tar.getmember("ustar/umlauts-\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
         self._test_member(tarinfo, size=7011, chksum=md5_regtype)
 
     def test_find_ustar_longname(self):
@@ -628,7 +673,7 @@
 
     def test_find_pax_umlauts(self):
         self.tar = tarfile.open(self.tarname, mode=self.mode, encoding="iso8859-1")
-        tarinfo = self.tar.getmember("pax/umlauts-ÄÖÜäöüß")
+        tarinfo = self.tar.getmember("pax/umlauts-\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
         self._test_member(tarinfo, size=7011, chksum=md5_regtype)
 
 
@@ -692,33 +737,39 @@
 
     def test_pax_global_headers(self):
         tar = tarfile.open(tarname, encoding="iso8859-1")
+        try:
 
-        tarinfo = tar.getmember("pax/regtype1")
-        self.assertEqual(tarinfo.uname, "foo")
-        self.assertEqual(tarinfo.gname, "bar")
-        self.assertEqual(tarinfo.pax_headers.get("VENDOR.umlauts"), u"ÄÖÜäöüß")
+            tarinfo = tar.getmember("pax/regtype1")
+            self.assertEqual(tarinfo.uname, "foo")
+            self.assertEqual(tarinfo.gname, "bar")
+            self.assertEqual(tarinfo.pax_headers.get("VENDOR.umlauts"), u"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
 
-        tarinfo = tar.getmember("pax/regtype2")
-        self.assertEqual(tarinfo.uname, "")
-        self.assertEqual(tarinfo.gname, "bar")
-        self.assertEqual(tarinfo.pax_headers.get("VENDOR.umlauts"), u"ÄÖÜäöüß")
+            tarinfo = tar.getmember("pax/regtype2")
+            self.assertEqual(tarinfo.uname, "")
+            self.assertEqual(tarinfo.gname, "bar")
+            self.assertEqual(tarinfo.pax_headers.get("VENDOR.umlauts"), u"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
 
-        tarinfo = tar.getmember("pax/regtype3")
-        self.assertEqual(tarinfo.uname, "tarfile")
-        self.assertEqual(tarinfo.gname, "tarfile")
-        self.assertEqual(tarinfo.pax_headers.get("VENDOR.umlauts"), u"ÄÖÜäöüß")
+            tarinfo = tar.getmember("pax/regtype3")
+            self.assertEqual(tarinfo.uname, "tarfile")
+            self.assertEqual(tarinfo.gname, "tarfile")
+            self.assertEqual(tarinfo.pax_headers.get("VENDOR.umlauts"), u"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
+        finally:
+            tar.close()
 
     def test_pax_number_fields(self):
         # All following number fields are read from the pax header.
         tar = tarfile.open(tarname, encoding="iso8859-1")
-        tarinfo = tar.getmember("pax/regtype4")
-        self.assertEqual(tarinfo.size, 7011)
-        self.assertEqual(tarinfo.uid, 123)
-        self.assertEqual(tarinfo.gid, 123)
-        self.assertEqual(tarinfo.mtime, 1041808783.0)
-        self.assertEqual(type(tarinfo.mtime), float)
-        self.assertEqual(float(tarinfo.pax_headers["atime"]), 1041808783.0)
-        self.assertEqual(float(tarinfo.pax_headers["ctime"]), 1041808783.0)
+        try:
+            tarinfo = tar.getmember("pax/regtype4")
+            self.assertEqual(tarinfo.size, 7011)
+            self.assertEqual(tarinfo.uid, 123)
+            self.assertEqual(tarinfo.gid, 123)
+            self.assertEqual(tarinfo.mtime, 1041808783.0)
+            self.assertEqual(type(tarinfo.mtime), float)
+            self.assertEqual(float(tarinfo.pax_headers["atime"]), 1041808783.0)
+            self.assertEqual(float(tarinfo.pax_headers["ctime"]), 1041808783.0)
+        finally:
+            tar.close()
 
 
 class WriteTestBase(unittest.TestCase):
@@ -750,52 +801,60 @@
         # a trailing '\0'.
         name = "0123456789" * 10
         tar = tarfile.open(tmpname, self.mode)
-        t = tarfile.TarInfo(name)
-        tar.addfile(t)
-        tar.close()
+        try:
+            t = tarfile.TarInfo(name)
+            tar.addfile(t)
+        finally:
+            tar.close()
 
         tar = tarfile.open(tmpname)
-        self.assertTrue(tar.getnames()[0] == name,
-                "failed to store 100 char filename")
-        tar.close()
+        try:
+            self.assertTrue(tar.getnames()[0] == name,
+                    "failed to store 100 char filename")
+        finally:
+            tar.close()
 
     def test_tar_size(self):
         # Test for bug #1013882.
         tar = tarfile.open(tmpname, self.mode)
-        path = os.path.join(TEMPDIR, "file")
-        fobj = open(path, "wb")
-        fobj.write("aaa")
-        fobj.close()
-        tar.add(path)
-        tar.close()
+        try:
+            path = os.path.join(TEMPDIR, "file")
+            with open(path, "wb") as fobj:
+                fobj.write("aaa")
+            tar.add(path)
+        finally:
+            tar.close()
         self.assertTrue(os.path.getsize(tmpname) > 0,
                 "tarfile is empty")
 
     # The test_*_size tests test for bug #1167128.
     def test_file_size(self):
         tar = tarfile.open(tmpname, self.mode)
+        try:
 
-        path = os.path.join(TEMPDIR, "file")
-        fobj = open(path, "wb")
-        fobj.close()
-        tarinfo = tar.gettarinfo(path)
-        self.assertEqual(tarinfo.size, 0)
+            path = os.path.join(TEMPDIR, "file")
+            with open(path, "wb"):
+                pass
+            tarinfo = tar.gettarinfo(path)
+            self.assertEqual(tarinfo.size, 0)
 
-        fobj = open(path, "wb")
-        fobj.write("aaa")
-        fobj.close()
-        tarinfo = tar.gettarinfo(path)
-        self.assertEqual(tarinfo.size, 3)
-
-        tar.close()
+            with open(path, "wb") as fobj:
+                fobj.write("aaa")
+            tarinfo = tar.gettarinfo(path)
+            self.assertEqual(tarinfo.size, 3)
+        finally:
+            tar.close()
 
     def test_directory_size(self):
         path = os.path.join(TEMPDIR, "directory")
         os.mkdir(path)
         try:
             tar = tarfile.open(tmpname, self.mode)
-            tarinfo = tar.gettarinfo(path)
-            self.assertEqual(tarinfo.size, 0)
+            try:
+                tarinfo = tar.gettarinfo(path)
+                self.assertEqual(tarinfo.size, 0)
+            finally:
+                tar.close()
         finally:
             os.rmdir(path)
 
@@ -803,16 +862,18 @@
         if hasattr(os, "link"):
             link = os.path.join(TEMPDIR, "link")
             target = os.path.join(TEMPDIR, "link_target")
-            fobj = open(target, "wb")
-            fobj.write("aaa")
-            fobj.close()
+            with open(target, "wb") as fobj:
+                fobj.write("aaa")
             os.link(target, link)
             try:
                 tar = tarfile.open(tmpname, self.mode)
-                # Record the link target in the inodes list.
-                tar.gettarinfo(target)
-                tarinfo = tar.gettarinfo(link)
-                self.assertEqual(tarinfo.size, 0)
+                try:
+                    # Record the link target in the inodes list.
+                    tar.gettarinfo(target)
+                    tarinfo = tar.gettarinfo(link)
+                    self.assertEqual(tarinfo.size, 0)
+                finally:
+                    tar.close()
             finally:
                 os.remove(target)
                 os.remove(link)
@@ -823,26 +884,30 @@
             os.symlink("link_target", path)
             try:
                 tar = tarfile.open(tmpname, self.mode)
-                tarinfo = tar.gettarinfo(path)
-                self.assertEqual(tarinfo.size, 0)
+                try:
+                    tarinfo = tar.gettarinfo(path)
+                    self.assertEqual(tarinfo.size, 0)
+                finally:
+                    tar.close()
             finally:
                 os.remove(path)
 
     def test_add_self(self):
         # Test for #1257255.
         dstname = os.path.abspath(tmpname)
-
         tar = tarfile.open(tmpname, self.mode)
-        self.assertTrue(tar.name == dstname, "archive name must be absolute")
+        try:
+            self.assertTrue(tar.name == dstname, "archive name must be absolute")
+            tar.add(dstname)
+            self.assertTrue(tar.getnames() == [], "added the archive to itself")
 
-        tar.add(dstname)
-        self.assertTrue(tar.getnames() == [], "added the archive to itself")
-
-        cwd = os.getcwd()
-        os.chdir(TEMPDIR)
-        tar.add(dstname)
-        os.chdir(cwd)
-        self.assertTrue(tar.getnames() == [], "added the archive to itself")
+            cwd = os.getcwd()
+            os.chdir(TEMPDIR)
+            tar.add(dstname)
+            os.chdir(cwd)
+            self.assertTrue(tar.getnames() == [], "added the archive to itself")
+        finally:
+            tar.close()
 
     def test_exclude(self):
         tempdir = os.path.join(TEMPDIR, "exclude")
@@ -855,14 +920,19 @@
             exclude = os.path.isfile
 
             tar = tarfile.open(tmpname, self.mode, encoding="iso8859-1")
-            with test_support.check_warnings(("use the filter argument",
-                                              DeprecationWarning)):
-                tar.add(tempdir, arcname="empty_dir", exclude=exclude)
-            tar.close()
+            try:
+                with test_support.check_warnings(("use the filter argument",
+                                                DeprecationWarning)):
+                    tar.add(tempdir, arcname="empty_dir", exclude=exclude)
+            finally:
+                tar.close()
 
             tar = tarfile.open(tmpname, "r")
-            self.assertEqual(len(tar.getmembers()), 1)
-            self.assertEqual(tar.getnames()[0], "empty_dir")
+            try:
+                self.assertEqual(len(tar.getmembers()), 1)
+                self.assertEqual(tar.getnames()[0], "empty_dir")
+            finally:
+                tar.close()
         finally:
             shutil.rmtree(tempdir)
 
@@ -882,15 +952,19 @@
                 return tarinfo
 
             tar = tarfile.open(tmpname, self.mode, encoding="iso8859-1")
-            tar.add(tempdir, arcname="empty_dir", filter=filter)
-            tar.close()
+            try:
+                tar.add(tempdir, arcname="empty_dir", filter=filter)
+            finally:
+                tar.close()
 
             tar = tarfile.open(tmpname, "r")
-            for tarinfo in tar:
-                self.assertEqual(tarinfo.uid, 123)
-                self.assertEqual(tarinfo.uname, "foo")
-            self.assertEqual(len(tar.getmembers()), 3)
-            tar.close()
+            try:
+                for tarinfo in tar:
+                    self.assertEqual(tarinfo.uid, 123)
+                    self.assertEqual(tarinfo.uname, "foo")
+                self.assertEqual(len(tar.getmembers()), 3)
+            finally:
+                tar.close()
         finally:
             shutil.rmtree(tempdir)
 
@@ -908,12 +982,16 @@
             os.mkdir(foo)
 
         tar = tarfile.open(tmpname, self.mode)
-        tar.add(foo, arcname=path)
-        tar.close()
+        try:
+            tar.add(foo, arcname=path)
+        finally:
+            tar.close()
 
         tar = tarfile.open(tmpname, "r")
-        t = tar.next()
-        tar.close()
+        try:
+            t = tar.next()
+        finally:
+            tar.close()
 
         if not dir:
             os.remove(foo)
@@ -948,21 +1026,19 @@
 
     def test_cwd(self):
         # Test adding the current working directory.
-        cwd = os.getcwd()
-        os.chdir(TEMPDIR)
-        try:
-            open("foo", "w").close()
-
+        with support.change_cwd(TEMPDIR):
             tar = tarfile.open(tmpname, self.mode)
-            tar.add(".")
-            tar.close()
+            try:
+                tar.add(".")
+            finally:
+                tar.close()
 
             tar = tarfile.open(tmpname, "r")
-            for t in tar:
-                self.assertTrue(t.name == "." or t.name.startswith("./"))
-            tar.close()
-        finally:
-            os.chdir(cwd)
+            try:
+                for t in tar:
+                    self.assertTrue(t.name == "." or t.name.startswith("./"))
+            finally:
+                tar.close()
 
     @unittest.skipUnless(hasattr(os, 'symlink'), "needs os.symlink")
     def test_extractall_symlinks(self):
@@ -1079,19 +1155,18 @@
         tar.close()
 
         if self.mode.endswith("gz"):
-            fobj = gzip.GzipFile(tmpname)
-            data = fobj.read()
-            fobj.close()
+            with gzip.GzipFile(tmpname) as fobj:
+                data = fobj.read()
         elif self.mode.endswith("bz2"):
             dec = bz2.BZ2Decompressor()
-            data = open(tmpname, "rb").read()
+            with open(tmpname, "rb") as fobj:
+                data = fobj.read()
             data = dec.decompress(data)
             self.assertTrue(len(dec.unused_data) == 0,
                     "found trailing data")
         else:
-            fobj = open(tmpname, "rb")
-            data = fobj.read()
-            fobj.close()
+            with open(tmpname, "rb") as fobj:
+                data = fobj.read()
 
         self.assertTrue(data.count("\0") == tarfile.RECORDSIZE,
                          "incorrect zero padding")
@@ -1152,23 +1227,27 @@
             tarinfo.type = tarfile.LNKTYPE
 
         tar = tarfile.open(tmpname, "w")
-        tar.format = tarfile.GNU_FORMAT
-        tar.addfile(tarinfo)
+        try:
+            tar.format = tarfile.GNU_FORMAT
+            tar.addfile(tarinfo)
 
-        v1 = self._calc_size(name, link)
-        v2 = tar.offset
-        self.assertTrue(v1 == v2, "GNU longname/longlink creation failed")
-
-        tar.close()
+            v1 = self._calc_size(name, link)
+            v2 = tar.offset
+            self.assertTrue(v1 == v2, "GNU longname/longlink creation failed")
+        finally:
+            tar.close()
 
         tar = tarfile.open(tmpname)
-        member = tar.next()
-        self.assertIsNotNone(member,
-                "unable to read longname member")
-        self.assertEqual(tarinfo.name, member.name,
-                "unable to read longname member")
-        self.assertEqual(tarinfo.linkname, member.linkname,
-                "unable to read longname member")
+        try:
+            member = tar.next()
+            self.assertIsNotNone(member,
+                    "unable to read longname member")
+            self.assertEqual(tarinfo.name, member.name,
+                    "unable to read longname member")
+            self.assertEqual(tarinfo.linkname, member.linkname,
+                    "unable to read longname member")
+        finally:
+            tar.close()
 
     def test_longname_1023(self):
         self._test(("longnam/" * 127) + "longnam")
@@ -1208,9 +1287,8 @@
         self.foo = os.path.join(TEMPDIR, "foo")
         self.bar = os.path.join(TEMPDIR, "bar")
 
-        fobj = open(self.foo, "wb")
-        fobj.write("foo")
-        fobj.close()
+        with open(self.foo, "wb") as fobj:
+            fobj.write("foo")
 
         os.link(self.foo, self.bar)
 
@@ -1219,8 +1297,8 @@
 
     def tearDown(self):
         self.tar.close()
-        os.remove(self.foo)
-        os.remove(self.bar)
+        support.unlink(self.foo)
+        support.unlink(self.bar)
 
     def test_add_twice(self):
         # The same name will be added as a REGTYPE every
@@ -1251,44 +1329,54 @@
             tarinfo.type = tarfile.LNKTYPE
 
         tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT)
-        tar.addfile(tarinfo)
-        tar.close()
+        try:
+            tar.addfile(tarinfo)
+        finally:
+            tar.close()
 
         tar = tarfile.open(tmpname)
-        if link:
-            l = tar.getmembers()[0].linkname
-            self.assertTrue(link == l, "PAX longlink creation failed")
-        else:
-            n = tar.getmembers()[0].name
-            self.assertTrue(name == n, "PAX longname creation failed")
+        try:
+            if link:
+                l = tar.getmembers()[0].linkname
+                self.assertTrue(link == l, "PAX longlink creation failed")
+            else:
+                n = tar.getmembers()[0].name
+                self.assertTrue(name == n, "PAX longname creation failed")
+        finally:
+            tar.close()
 
     def test_pax_global_header(self):
         pax_headers = {
                 u"foo": u"bar",
                 u"uid": u"0",
                 u"mtime": u"1.23",
-                u"test": u"äöü",
-                u"äöü": u"test"}
+                u"test": u"\xe4\xf6\xfc",
+                u"\xe4\xf6\xfc": u"test"}
 
         tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT,
                 pax_headers=pax_headers)
-        tar.addfile(tarfile.TarInfo("test"))
-        tar.close()
+        try:
+            tar.addfile(tarfile.TarInfo("test"))
+        finally:
+            tar.close()
 
         # Test if the global header was written correctly.
         tar = tarfile.open(tmpname, encoding="iso8859-1")
-        self.assertEqual(tar.pax_headers, pax_headers)
-        self.assertEqual(tar.getmembers()[0].pax_headers, pax_headers)
+        try:
+            self.assertEqual(tar.pax_headers, pax_headers)
+            self.assertEqual(tar.getmembers()[0].pax_headers, pax_headers)
 
-        # Test if all the fields are unicode.
-        for key, val in tar.pax_headers.iteritems():
-            self.assertTrue(type(key) is unicode)
-            self.assertTrue(type(val) is unicode)
-            if key in tarfile.PAX_NUMBER_FIELDS:
-                try:
-                    tarfile.PAX_NUMBER_FIELDS[key](val)
-                except (TypeError, ValueError):
-                    self.fail("unable to convert pax header field")
+            # Test if all the fields are unicode.
+            for key, val in tar.pax_headers.iteritems():
+                self.assertTrue(type(key) is unicode)
+                self.assertTrue(type(val) is unicode)
+                if key in tarfile.PAX_NUMBER_FIELDS:
+                    try:
+                        tarfile.PAX_NUMBER_FIELDS[key](val)
+                    except (TypeError, ValueError):
+                        self.fail("unable to convert pax header field")
+        finally:
+            tar.close()
 
     def test_pax_extended_header(self):
         # The fields from the pax header have priority over the
@@ -1296,18 +1384,23 @@
         pax_headers = {u"path": u"foo", u"uid": u"123"}
 
         tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT, encoding="iso8859-1")
-        t = tarfile.TarInfo()
-        t.name = u"äöü"     # non-ASCII
-        t.uid = 8**8        # too large
-        t.pax_headers = pax_headers
-        tar.addfile(t)
-        tar.close()
+        try:
+            t = tarfile.TarInfo()
+            t.name = u"\xe4\xf6\xfc"     # non-ASCII
+            t.uid = 8**8        # too large
+            t.pax_headers = pax_headers
+            tar.addfile(t)
+        finally:
+            tar.close()
 
         tar = tarfile.open(tmpname, encoding="iso8859-1")
-        t = tar.getmembers()[0]
-        self.assertEqual(t.pax_headers, pax_headers)
-        self.assertEqual(t.name, "foo")
-        self.assertEqual(t.uid, 123)
+        try:
+            t = tar.getmembers()[0]
+            self.assertEqual(t.pax_headers, pax_headers)
+            self.assertEqual(t.name, "foo")
+            self.assertEqual(t.uid, 123)
+        finally:
+            tar.close()
 
 
 class UstarUnicodeTest(unittest.TestCase):
@@ -1326,57 +1419,68 @@
 
     def _test_unicode_filename(self, encoding):
         tar = tarfile.open(tmpname, "w", format=self.format, encoding=encoding, errors="strict")
-        name = u"äöü"
-        tar.addfile(tarfile.TarInfo(name))
-        tar.close()
+        try:
+            name = u"\xe4\xf6\xfc"
+            tar.addfile(tarfile.TarInfo(name))
+        finally:
+            tar.close()
 
         tar = tarfile.open(tmpname, encoding=encoding)
-        self.assertTrue(type(tar.getnames()[0]) is not unicode)
-        self.assertEqual(tar.getmembers()[0].name, name.encode(encoding))
-        tar.close()
+        try:
+            self.assertTrue(type(tar.getnames()[0]) is not unicode)
+            self.assertEqual(tar.getmembers()[0].name, name.encode(encoding))
+        finally:
+            tar.close()
 
     def test_unicode_filename_error(self):
         tar = tarfile.open(tmpname, "w", format=self.format, encoding="ascii", errors="strict")
-        tarinfo = tarfile.TarInfo()
+        try:
+            tarinfo = tarfile.TarInfo()
 
-        tarinfo.name = "äöü"
-        if self.format == tarfile.PAX_FORMAT:
+            tarinfo.name = "\xe4\xf6\xfc"
+            if self.format == tarfile.PAX_FORMAT:
+                self.assertRaises(UnicodeError, tar.addfile, tarinfo)
+            else:
+                tar.addfile(tarinfo)
+
+            tarinfo.name = u"\xe4\xf6\xfc"
             self.assertRaises(UnicodeError, tar.addfile, tarinfo)
-        else:
-            tar.addfile(tarinfo)
 
-        tarinfo.name = u"äöü"
-        self.assertRaises(UnicodeError, tar.addfile, tarinfo)
-
-        tarinfo.name = "foo"
-        tarinfo.uname = u"äöü"
-        self.assertRaises(UnicodeError, tar.addfile, tarinfo)
+            tarinfo.name = "foo"
+            tarinfo.uname = u"\xe4\xf6\xfc"
+            self.assertRaises(UnicodeError, tar.addfile, tarinfo)
+        finally:
+            tar.close()
 
     def test_unicode_argument(self):
         tar = tarfile.open(tarname, "r", encoding="iso8859-1", errors="strict")
-        for t in tar:
-            self.assertTrue(type(t.name) is str)
-            self.assertTrue(type(t.linkname) is str)
-            self.assertTrue(type(t.uname) is str)
-            self.assertTrue(type(t.gname) is str)
-        tar.close()
+        try:
+            for t in tar:
+                self.assertTrue(type(t.name) is str)
+                self.assertTrue(type(t.linkname) is str)
+                self.assertTrue(type(t.uname) is str)
+                self.assertTrue(type(t.gname) is str)
+        finally:
+            tar.close()
 
     def test_uname_unicode(self):
-        for name in (u"äöü", "äöü"):
+        for name in (u"\xe4\xf6\xfc", "\xe4\xf6\xfc"):
             t = tarfile.TarInfo("foo")
             t.uname = name
             t.gname = name
 
             fobj = StringIO.StringIO()
             tar = tarfile.open("foo.tar", mode="w", fileobj=fobj, format=self.format, encoding="iso8859-1")
-            tar.addfile(t)
-            tar.close()
+            try:
+                tar.addfile(t)
+            finally:
+                tar.close()
             fobj.seek(0)
 
             tar = tarfile.open("foo.tar", fileobj=fobj, encoding="iso8859-1")
             t = tar.getmember("foo")
-            self.assertEqual(t.uname, "äöü")
-            self.assertEqual(t.gname, "äöü")
+            self.assertEqual(t.uname, "\xe4\xf6\xfc")
+            self.assertEqual(t.gname, "\xe4\xf6\xfc")
 
 
 class GNUUnicodeTest(UstarUnicodeTest):
@@ -1398,9 +1502,9 @@
     def test_error_handlers(self):
         # Test if the unicode error handlers work correctly for characters
         # that cannot be expressed in a given encoding.
-        self._create_unicode_name(u"äöü")
+        self._create_unicode_name(u"\xe4\xf6\xfc")
 
-        for handler, name in (("utf-8", u"äöü".encode("utf8")),
+        for handler, name in (("utf-8", u"\xe4\xf6\xfc".encode("utf8")),
                     ("replace", "???"), ("ignore", "")):
             tar = tarfile.open(tmpname, format=self.format, encoding="ascii",
                     errors=handler)
@@ -1412,11 +1516,11 @@
     def test_error_handler_utf8(self):
         # Create a pathname that has one component representable using
         # iso8859-1 and the other only in iso8859-15.
-        self._create_unicode_name(u"äöü/¤")
+        self._create_unicode_name(u"\xe4\xf6\xfc/\u20ac")
 
         tar = tarfile.open(tmpname, format=self.format, encoding="iso8859-1",
                 errors="utf-8")
-        self.assertEqual(tar.getnames()[0], "äöü/" + u"¤".encode("utf8"))
+        self.assertEqual(tar.getnames()[0], "\xe4\xf6\xfc/" + u"\u20ac".encode("utf8"))
 
 
 class AppendTest(unittest.TestCase):
@@ -1428,22 +1532,20 @@
             os.remove(self.tarname)
 
     def _add_testfile(self, fileobj=None):
-        tar = tarfile.open(self.tarname, "a", fileobj=fileobj)
-        tar.addfile(tarfile.TarInfo("bar"))
-        tar.close()
+        with tarfile.open(self.tarname, "a", fileobj=fileobj) as tar:
+            tar.addfile(tarfile.TarInfo("bar"))
 
     def _create_testtar(self, mode="w:"):
-        src = tarfile.open(tarname, encoding="iso8859-1")
-        t = src.getmember("ustar/regtype")
-        t.name = "foo"
-        f = src.extractfile(t)
-        tar = tarfile.open(self.tarname, mode)
-        tar.addfile(t, f)
-        tar.close()
+        with tarfile.open(tarname, encoding="iso8859-1") as src:
+            t = src.getmember("ustar/regtype")
+            t.name = "foo"
+            f = src.extractfile(t)
+            with tarfile.open(self.tarname, mode) as tar:
+                tar.addfile(t, f)
 
     def _test(self, names=["bar"], fileobj=None):
-        tar = tarfile.open(self.tarname, fileobj=fileobj)
-        self.assertEqual(tar.getnames(), names)
+        with tarfile.open(self.tarname, fileobj=fileobj) as tar:
+            self.assertEqual(tar.getnames(), names)
 
     def test_non_existing(self):
         self._add_testfile()
@@ -1462,7 +1564,8 @@
 
     def test_fileobj(self):
         self._create_testtar()
-        data = open(self.tarname).read()
+        with open(self.tarname) as fobj:
+            data = fobj.read()
         fobj = StringIO.StringIO(data)
         self._add_testfile(fobj)
         fobj.seek(0)
@@ -1486,7 +1589,8 @@
     # Append mode is supposed to fail if the tarfile to append to
     # does not end with a zero block.
     def _test_error(self, data):
-        open(self.tarname, "wb").write(data)
+        with open(self.tarname, "wb") as fobj:
+            fobj.write(data)
         self.assertRaises(tarfile.ReadError, self._add_testfile)
 
     def test_null(self):
@@ -1566,6 +1670,14 @@
         tarinfo.tobuf(tarfile.PAX_FORMAT)
 
 
+class MiscTest(unittest.TestCase):
+
+    def test_read_number_fields(self):
+        # Issue 24514: Test if empty number fields are converted to zero.
+        self.assertEqual(tarfile.nti("\0"), 0)
+        self.assertEqual(tarfile.nti("       \0"), 0)
+
+
 class ContextManagerTest(unittest.TestCase):
 
     def test_basic(self):
@@ -1614,15 +1726,14 @@
     def test_fileobj(self):
         # Test that __exit__() did not close the external file
         # object.
-        fobj = open(tmpname, "wb")
-        try:
-            with tarfile.open(fileobj=fobj, mode="w") as tar:
-                raise Exception
-        except:
-            pass
-        self.assertFalse(fobj.closed, "external file object was closed")
-        self.assertTrue(tar.closed, "context manager failed")
-        fobj.close()
+        with open(tmpname, "wb") as fobj:
+            try:
+                with tarfile.open(fileobj=fobj, mode="w") as tar:
+                    raise Exception
+            except:
+                pass
+            self.assertFalse(fobj.closed, "external file object was closed")
+            self.assertTrue(tar.closed, "context manager failed")
 
 
 class LinkEmulationTest(ReadTest):
@@ -1710,6 +1821,7 @@
 
 
 def test_main():
+    support.unlink(TEMPDIR)
     os.makedirs(TEMPDIR)
 
     tests = [
@@ -1730,6 +1842,7 @@
         PaxUnicodeTest,
         AppendTest,
         LimitsTest,
+        MiscTest,
         ContextManagerTest,
     ]
 
@@ -1738,15 +1851,14 @@
     else:
         tests.append(LinkEmulationTest)
 
-    fobj = open(tarname, "rb")
-    data = fobj.read()
-    fobj.close()
+    with open(tarname, "rb") as fobj:
+        data = fobj.read()
 
     if gzip:
         # Create testtar.tar.gz and add gzip-specific tests.
-        tar = gzip.open(gzipname, "wb")
-        tar.write(data)
-        tar.close()
+        support.unlink(gzipname)
+        with gzip.open(gzipname, "wb") as tar:
+            tar.write(data)
 
         tests += [
             GzipMiscReadTest,
@@ -1759,9 +1871,12 @@
 
     if bz2:
         # Create testtar.tar.bz2 and add bz2-specific tests.
+        support.unlink(bz2name)
         tar = bz2.BZ2File(bz2name, "wb")
-        tar.write(data)
-        tar.close()
+        try:
+            tar.write(data)
+        finally:
+            tar.close()
 
         tests += [
             Bz2MiscReadTest,
diff --git a/lib/python2.7/test/test_tcl.py b/lib/python2.7/test/test_tcl.py
index 9c9afde..4c2e8d5 100644
--- a/lib/python2.7/test/test_tcl.py
+++ b/lib/python2.7/test/test_tcl.py
@@ -8,9 +8,7 @@
 # Skip this test if the _tkinter module wasn't built.
 _tkinter = test_support.import_module('_tkinter')
 
-# Make sure tkinter._fix runs to set up the environment
-tkinter = test_support.import_fresh_module('Tkinter')
-
+import Tkinter as tkinter
 from Tkinter import Tcl
 from _tkinter import TclError
 
diff --git a/lib/python2.7/test/test_tempfile.py b/lib/python2.7/test/test_tempfile.py
index 465bcda..078e4a9 100644
--- a/lib/python2.7/test/test_tempfile.py
+++ b/lib/python2.7/test/test_tempfile.py
@@ -287,7 +287,42 @@
                              lambda: iter(names))
 
 
-class test__mkstemp_inner(TC):
+class TestBadTempdir:
+
+    def test_read_only_directory(self):
+        with _inside_empty_temp_dir():
+            oldmode = mode = os.stat(tempfile.tempdir).st_mode
+            mode &= ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
+            os.chmod(tempfile.tempdir, mode)
+            try:
+                if os.access(tempfile.tempdir, os.W_OK):
+                    self.skipTest("can't set the directory read-only")
+                with self.assertRaises(OSError) as cm:
+                    self.make_temp()
+                self.assertIn(cm.exception.errno, (errno.EPERM, errno.EACCES))
+                self.assertEqual(os.listdir(tempfile.tempdir), [])
+            finally:
+                os.chmod(tempfile.tempdir, oldmode)
+
+    def test_nonexisting_directory(self):
+        with _inside_empty_temp_dir():
+            tempdir = os.path.join(tempfile.tempdir, 'nonexistent')
+            with support.swap_attr(tempfile, 'tempdir', tempdir):
+                with self.assertRaises(OSError) as cm:
+                    self.make_temp()
+                self.assertEqual(cm.exception.errno, errno.ENOENT)
+
+    def test_non_directory(self):
+        with _inside_empty_temp_dir():
+            tempdir = os.path.join(tempfile.tempdir, 'file')
+            open(tempdir, 'wb').close()
+            with support.swap_attr(tempfile, 'tempdir', tempdir):
+                with self.assertRaises(OSError) as cm:
+                    self.make_temp()
+                self.assertIn(cm.exception.errno, (errno.ENOTDIR, errno.ENOENT))
+
+
+class test__mkstemp_inner(TestBadTempdir, TC):
     """Test the internal function _mkstemp_inner."""
 
     class mkstemped:
@@ -400,7 +435,7 @@
         self.do_create(bin=0).write("blat\n")
         # XXX should test that the file really is a text file
 
-    def default_mkstemp_inner(self):
+    def make_temp(self):
         return tempfile._mkstemp_inner(tempfile.gettempdir(),
                                        tempfile.template,
                                        '',
@@ -411,11 +446,11 @@
         # the chosen name already exists
         with _inside_empty_temp_dir(), \
              _mock_candidate_names('aaa', 'aaa', 'bbb'):
-            (fd1, name1) = self.default_mkstemp_inner()
+            (fd1, name1) = self.make_temp()
             os.close(fd1)
             self.assertTrue(name1.endswith('aaa'))
 
-            (fd2, name2) = self.default_mkstemp_inner()
+            (fd2, name2) = self.make_temp()
             os.close(fd2)
             self.assertTrue(name2.endswith('bbb'))
 
@@ -427,7 +462,7 @@
             dir = tempfile.mkdtemp()
             self.assertTrue(dir.endswith('aaa'))
 
-            (fd, name) = self.default_mkstemp_inner()
+            (fd, name) = self.make_temp()
             os.close(fd)
             self.assertTrue(name.endswith('bbb'))
 
@@ -542,9 +577,12 @@
 test_classes.append(test_mkstemp)
 
 
-class test_mkdtemp(TC):
+class test_mkdtemp(TestBadTempdir, TC):
     """Test mkdtemp()."""
 
+    def make_temp(self):
+        return tempfile.mkdtemp()
+
     def do_create(self, dir=None, pre="", suf=""):
         if dir is None:
             dir = tempfile.gettempdir()
@@ -789,6 +827,13 @@
             os.close = old_close
             os.fdopen = old_fdopen
 
+    def test_bad_mode(self):
+        dir = tempfile.mkdtemp()
+        self.addCleanup(support.rmtree, dir)
+        with self.assertRaises(TypeError):
+            tempfile.NamedTemporaryFile(mode=(), dir=dir)
+        self.assertEqual(os.listdir(dir), [])
+
     # How to test the mode and bufsize parameters?
 
 test_classes.append(test_NamedTemporaryFile)
diff --git a/lib/python2.7/test/test_textwrap.py b/lib/python2.7/test/test_textwrap.py
index 7b72672..dccf095 100644
--- a/lib/python2.7/test/test_textwrap.py
+++ b/lib/python2.7/test/test_textwrap.py
@@ -647,6 +647,11 @@
         expect = "hello there\n  how are you?"
         self.assertEqual(expect, dedent(text))
 
+        # test margin is smaller than smallest indent
+        text = "  \thello there\n   \thow are you?\n \tI'm fine, thanks"
+        expect = " \thello there\n  \thow are you?\n\tI'm fine, thanks"
+        self.assertEqual(expect, dedent(text))
+
 
 def test_main():
     test_support.run_unittest(WrapTestCase,
diff --git a/lib/python2.7/test/test_thread.py b/lib/python2.7/test/test_thread.py
index b056039..b466138 100644
--- a/lib/python2.7/test/test_thread.py
+++ b/lib/python2.7/test/test_thread.py
@@ -233,7 +233,12 @@
             if pid == 0: # child
                 os.close(self.read_fd)
                 os.write(self.write_fd, "OK")
-                sys.exit(0)
+                # Exiting the thread normally in the child process can leave
+                # any additional threads (such as the one started by
+                # importing _tkinter) still running, and this can prevent
+                # the half-zombie child process from being cleaned up. See
+                # Issue #26456.
+                os._exit(0)
             else: # parent
                 os.close(self.write_fd)
 
diff --git a/lib/python2.7/test/test_threading.py b/lib/python2.7/test/test_threading.py
index 44f2c57..212bd1a 100644
--- a/lib/python2.7/test/test_threading.py
+++ b/lib/python2.7/test/test_threading.py
@@ -51,7 +51,7 @@
                 self.nrunning.inc()
                 if verbose:
                     print self.nrunning.get(), 'tasks are running'
-                self.testcase.assertTrue(self.nrunning.get() <= 3)
+                self.testcase.assertLessEqual(self.nrunning.get(), 3)
 
             time.sleep(delay)
             if verbose:
@@ -59,7 +59,7 @@
 
             with self.mutex:
                 self.nrunning.dec()
-                self.testcase.assertTrue(self.nrunning.get() >= 0)
+                self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
                 if verbose:
                     print '%s is finished. %d tasks are running' % (
                         self.name, self.nrunning.get())
@@ -92,25 +92,25 @@
         for i in range(NUMTASKS):
             t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
             threads.append(t)
-            self.assertEqual(t.ident, None)
-            self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
+            self.assertIsNone(t.ident)
+            self.assertRegexpMatches(repr(t), r'^<TestThread\(.*, initial\)>$')
             t.start()
 
         if verbose:
             print 'waiting for all tasks to complete'
         for t in threads:
             t.join(NUMTASKS)
-            self.assertTrue(not t.is_alive())
+            self.assertFalse(t.is_alive())
             self.assertNotEqual(t.ident, 0)
-            self.assertFalse(t.ident is None)
-            self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
+            self.assertIsNotNone(t.ident)
+            self.assertRegexpMatches(repr(t), r'^<TestThread\(.*, \w+ -?\d+\)>$')
         if verbose:
             print 'all tasks done'
         self.assertEqual(numrunning.get(), 0)
 
     def test_ident_of_no_threading_threads(self):
         # The ident still must work for the main thread and dummy threads.
-        self.assertFalse(threading.currentThread().ident is None)
+        self.assertIsNotNone(threading.currentThread().ident)
         def f():
             ident.append(threading.currentThread().ident)
             done.set()
@@ -118,7 +118,7 @@
         ident = []
         thread.start_new_thread(f, ())
         done.wait()
-        self.assertFalse(ident[0] is None)
+        self.assertIsNotNone(ident[0])
         # Kill the "immortal" _DummyThread
         del threading._active[ident[0]]
 
@@ -236,7 +236,7 @@
         self.assertTrue(ret)
         if verbose:
             print "    verifying worker hasn't exited"
-        self.assertTrue(not t.finished)
+        self.assertFalse(t.finished)
         if verbose:
             print "    attempting to raise asynch exception in worker"
         result = set_async_exc(ctypes.c_long(t.id), exception)
diff --git a/lib/python2.7/test/test_threading_local.py b/lib/python2.7/test/test_threading_local.py
index b161315..e53400c 100644
--- a/lib/python2.7/test/test_threading_local.py
+++ b/lib/python2.7/test/test_threading_local.py
@@ -196,7 +196,7 @@
         wr = weakref.ref(x)
         del x
         gc.collect()
-        self.assertIs(wr(), None)
+        self.assertIsNone(wr())
 
 class PyThreadingLocalTest(unittest.TestCase, BaseLocalTest):
     _local = _threading_local.local
diff --git a/lib/python2.7/test/test_timeit.py b/lib/python2.7/test/test_timeit.py
index a084b68..3a3359c 100644
--- a/lib/python2.7/test/test_timeit.py
+++ b/lib/python2.7/test/test_timeit.py
@@ -120,6 +120,9 @@
     def test_timeit_callable_stmt(self):
         self.timeit(self.fake_callable_stmt, self.fake_setup, number=3)
 
+    def test_timeit_callable_setup(self):
+        self.timeit(self.fake_stmt, self.fake_callable_setup, number=3)
+
     def test_timeit_callable_stmt_and_setup(self):
         self.timeit(self.fake_callable_stmt,
                 self.fake_callable_setup, number=3)
@@ -169,6 +172,10 @@
         self.repeat(self.fake_callable_stmt, self.fake_setup,
                 repeat=3, number=5)
 
+    def test_repeat_callable_setup(self):
+        self.repeat(self.fake_stmt, self.fake_callable_setup,
+                repeat=3, number=5)
+
     def test_repeat_callable_stmt_and_setup(self):
         self.repeat(self.fake_callable_stmt, self.fake_callable_setup,
                 repeat=3, number=5)
diff --git a/lib/python2.7/test/test_tokenize.py b/lib/python2.7/test/test_tokenize.py
index 850aa9c..fd9486b 100644
--- a/lib/python2.7/test/test_tokenize.py
+++ b/lib/python2.7/test/test_tokenize.py
@@ -1,20 +1,42 @@
-doctests = """
-Tests for the tokenize module.
+from test import test_support
+from tokenize import (untokenize, generate_tokens, NUMBER, NAME, OP,
+                     STRING, ENDMARKER, tok_name, Untokenizer, tokenize)
+from StringIO import StringIO
+import os
+from unittest import TestCase
 
-    >>> import glob, random, sys
 
-The tests can be really simple. Given a small fragment of source
-code, print out a table with tokens. The ENDMARKER is omitted for
-brevity.
+class TokenizeTest(TestCase):
+    # Tests for the tokenize module.
 
-    >>> dump_tokens("1 + 1")
+    # The tests can be really simple. Given a small fragment of source
+    # code, print out a table with tokens. The ENDMARKER is omitted for
+    # brevity.
+
+    def check_tokenize(self, s, expected):
+        # Format the tokens in s in a table format.
+        # The ENDMARKER is omitted.
+        result = []
+        f = StringIO(s)
+        for type, token, start, end, line in generate_tokens(f.readline):
+            if type == ENDMARKER:
+                break
+            type = tok_name[type]
+            result.append("    %(type)-10.10s %(token)-13.13r %(start)s %(end)s" %
+                          locals())
+        self.assertEqual(result,
+                         expected.rstrip().splitlines())
+
+
+    def test_basic(self):
+        self.check_tokenize("1 + 1", """\
     NUMBER     '1'           (1, 0) (1, 1)
     OP         '+'           (1, 2) (1, 3)
     NUMBER     '1'           (1, 4) (1, 5)
-
-    >>> dump_tokens("if False:\\n"
-    ...             "    # NL\\n"
-    ...             "    True = False # NEWLINE\\n")
+    """)
+        self.check_tokenize("if False:\n"
+                            "    # NL\n"
+                            "    True = False # NEWLINE\n", """\
     NAME       'if'          (1, 0) (1, 2)
     NAME       'False'       (1, 3) (1, 8)
     OP         ':'           (1, 8) (1, 9)
@@ -28,122 +50,48 @@
     COMMENT    '# NEWLINE'   (3, 17) (3, 26)
     NEWLINE    '\\n'          (3, 26) (3, 27)
     DEDENT     ''            (4, 0) (4, 0)
+    """)
 
-    >>> indent_error_file = \"""
-    ... def k(x):
-    ...     x += 2
-    ...   x += 5
-    ... \"""
+        indent_error_file = """\
+def k(x):
+    x += 2
+  x += 5
+"""
+        with self.assertRaisesRegexp(IndentationError,
+                                     "unindent does not match any "
+                                     "outer indentation level"):
+            for tok in generate_tokens(StringIO(indent_error_file).readline):
+                pass
 
-    >>> for tok in generate_tokens(StringIO(indent_error_file).readline): pass
-    Traceback (most recent call last):
-        ...
-    IndentationError: unindent does not match any outer indentation level
-
-Test roundtrip for `untokenize`. `f` is an open file or a string. The source
-code in f is tokenized, converted back to source code via tokenize.untokenize(),
-and tokenized again from the latter. The test fails if the second tokenization
-doesn't match the first.
-
-    >>> def roundtrip(f):
-    ...     if isinstance(f, str): f = StringIO(f)
-    ...     token_list = list(generate_tokens(f.readline))
-    ...     f.close()
-    ...     tokens1 = [tok[:2] for tok in token_list]
-    ...     new_text = untokenize(tokens1)
-    ...     readline = iter(new_text.splitlines(1)).next
-    ...     tokens2 = [tok[:2] for tok in generate_tokens(readline)]
-    ...     return tokens1 == tokens2
-    ...
-
-There are some standard formatting practices that are easy to get right.
-
-    >>> roundtrip("if x == 1:\\n"
-    ...           "    print x\\n")
-    True
-
-    >>> roundtrip("# This is a comment\\n# This also")
-    True
-
-Some people use different formatting conventions, which makes
-untokenize a little trickier. Note that this test involves trailing
-whitespace after the colon. Note that we use hex escapes to make the
-two trailing blanks apperant in the expected output.
-
-    >>> roundtrip("if x == 1 : \\n"
-    ...           "  print x\\n")
-    True
-
-    >>> f = test_support.findfile("tokenize_tests" + os.extsep + "txt")
-    >>> roundtrip(open(f))
-    True
-
-    >>> roundtrip("if x == 1:\\n"
-    ...           "    # A comment by itself.\\n"
-    ...           "    print x # Comment here, too.\\n"
-    ...           "    # Another comment.\\n"
-    ...           "after_if = True\\n")
-    True
-
-    >>> roundtrip("if (x # The comments need to go in the right place\\n"
-    ...           "    == 1):\\n"
-    ...           "    print 'x==1'\\n")
-    True
-
-    >>> roundtrip("class Test: # A comment here\\n"
-    ...           "  # A comment with weird indent\\n"
-    ...           "  after_com = 5\\n"
-    ...           "  def x(m): return m*5 # a one liner\\n"
-    ...           "  def y(m): # A whitespace after the colon\\n"
-    ...           "     return y*4 # 3-space indent\\n")
-    True
-
-Some error-handling code
-
-    >>> roundtrip("try: import somemodule\\n"
-    ...           "except ImportError: # comment\\n"
-    ...           "    print 'Can not import' # comment2\\n"
-    ...           "else:   print 'Loaded'\\n")
-    True
-
-Balancing continuation
-
-    >>> roundtrip("a = (3,4, \\n"
-    ...           "5,6)\\n"
-    ...           "y = [3, 4,\\n"
-    ...           "5]\\n"
-    ...           "z = {'a': 5,\\n"
-    ...           "'b':15, 'c':True}\\n"
-    ...           "x = len(y) + 5 - a[\\n"
-    ...           "3] - a[2]\\n"
-    ...           "+ len(z) - z[\\n"
-    ...           "'b']\\n")
-    True
-
-Ordinary integers and binary operators
-
-    >>> dump_tokens("0xff <= 255")
+    def test_int(self):
+        # Ordinary integers and binary operators
+        self.check_tokenize("0xff <= 255", """\
     NUMBER     '0xff'        (1, 0) (1, 4)
     OP         '<='          (1, 5) (1, 7)
     NUMBER     '255'         (1, 8) (1, 11)
-    >>> dump_tokens("0b10 <= 255")
+    """)
+        self.check_tokenize("0b10 <= 255", """\
     NUMBER     '0b10'        (1, 0) (1, 4)
     OP         '<='          (1, 5) (1, 7)
     NUMBER     '255'         (1, 8) (1, 11)
-    >>> dump_tokens("0o123 <= 0123")
+    """)
+        self.check_tokenize("0o123 <= 0123", """\
     NUMBER     '0o123'       (1, 0) (1, 5)
     OP         '<='          (1, 6) (1, 8)
     NUMBER     '0123'        (1, 9) (1, 13)
-    >>> dump_tokens("01234567 > ~0x15")
+    """)
+        self.check_tokenize("01234567 > ~0x15", """\
     NUMBER     '01234567'    (1, 0) (1, 8)
     OP         '>'           (1, 9) (1, 10)
     OP         '~'           (1, 11) (1, 12)
     NUMBER     '0x15'        (1, 12) (1, 16)
-    >>> dump_tokens("2134568 != 01231515")
+    """)
+        self.check_tokenize("2134568 != 01231515", """\
     NUMBER     '2134568'     (1, 0) (1, 7)
     OP         '!='          (1, 8) (1, 10)
     NUMBER     '01231515'    (1, 11) (1, 19)
-    >>> dump_tokens("(-124561-1) & 0200000000")
+    """)
+        self.check_tokenize("(-124561-1) & 0200000000", """\
     OP         '('           (1, 0) (1, 1)
     OP         '-'           (1, 1) (1, 2)
     NUMBER     '124561'      (1, 2) (1, 8)
@@ -152,78 +100,93 @@
     OP         ')'           (1, 10) (1, 11)
     OP         '&'           (1, 12) (1, 13)
     NUMBER     '0200000000'  (1, 14) (1, 24)
-    >>> dump_tokens("0xdeadbeef != -1")
+    """)
+        self.check_tokenize("0xdeadbeef != -1", """\
     NUMBER     '0xdeadbeef'  (1, 0) (1, 10)
     OP         '!='          (1, 11) (1, 13)
     OP         '-'           (1, 14) (1, 15)
     NUMBER     '1'           (1, 15) (1, 16)
-    >>> dump_tokens("0xdeadc0de & 012345")
+    """)
+        self.check_tokenize("0xdeadc0de & 012345", """\
     NUMBER     '0xdeadc0de'  (1, 0) (1, 10)
     OP         '&'           (1, 11) (1, 12)
     NUMBER     '012345'      (1, 13) (1, 19)
-    >>> dump_tokens("0xFF & 0x15 | 1234")
+    """)
+        self.check_tokenize("0xFF & 0x15 | 1234", """\
     NUMBER     '0xFF'        (1, 0) (1, 4)
     OP         '&'           (1, 5) (1, 6)
     NUMBER     '0x15'        (1, 7) (1, 11)
     OP         '|'           (1, 12) (1, 13)
     NUMBER     '1234'        (1, 14) (1, 18)
+    """)
 
-Long integers
-
-    >>> dump_tokens("x = 0L")
+    def test_long(self):
+        # Long integers
+        self.check_tokenize("x = 0L", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     NUMBER     '0L'          (1, 4) (1, 6)
-    >>> dump_tokens("x = 0xfffffffffff")
+    """)
+        self.check_tokenize("x = 0xfffffffffff", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     NUMBER     '0xffffffffff (1, 4) (1, 17)
-    >>> dump_tokens("x = 123141242151251616110l")
+    """)
+        self.check_tokenize("x = 123141242151251616110l", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     NUMBER     '123141242151 (1, 4) (1, 26)
-    >>> dump_tokens("x = -15921590215012591L")
+    """)
+        self.check_tokenize("x = -15921590215012591L", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     OP         '-'           (1, 4) (1, 5)
     NUMBER     '159215902150 (1, 5) (1, 23)
+    """)
 
-Floating point numbers
-
-    >>> dump_tokens("x = 3.14159")
+    def test_float(self):
+        # Floating point numbers
+        self.check_tokenize("x = 3.14159", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     NUMBER     '3.14159'     (1, 4) (1, 11)
-    >>> dump_tokens("x = 314159.")
+    """)
+        self.check_tokenize("x = 314159.", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     NUMBER     '314159.'     (1, 4) (1, 11)
-    >>> dump_tokens("x = .314159")
+    """)
+        self.check_tokenize("x = .314159", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     NUMBER     '.314159'     (1, 4) (1, 11)
-    >>> dump_tokens("x = 3e14159")
+    """)
+        self.check_tokenize("x = 3e14159", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     NUMBER     '3e14159'     (1, 4) (1, 11)
-    >>> dump_tokens("x = 3E123")
+    """)
+        self.check_tokenize("x = 3E123", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     NUMBER     '3E123'       (1, 4) (1, 9)
-    >>> dump_tokens("x+y = 3e-1230")
+    """)
+        self.check_tokenize("x+y = 3e-1230", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '+'           (1, 1) (1, 2)
     NAME       'y'           (1, 2) (1, 3)
     OP         '='           (1, 4) (1, 5)
     NUMBER     '3e-1230'     (1, 6) (1, 13)
-    >>> dump_tokens("x = 3.14e159")
+    """)
+        self.check_tokenize("x = 3.14e159", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     NUMBER     '3.14e159'    (1, 4) (1, 12)
+    """)
 
-String literals
-
-    >>> dump_tokens("x = ''; y = \\\"\\\"")
+    def test_string(self):
+        # String literals
+        self.check_tokenize("x = ''; y = \"\"", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     STRING     "''"          (1, 4) (1, 6)
@@ -231,7 +194,8 @@
     NAME       'y'           (1, 8) (1, 9)
     OP         '='           (1, 10) (1, 11)
     STRING     '""'          (1, 12) (1, 14)
-    >>> dump_tokens("x = '\\\"'; y = \\\"'\\\"")
+    """)
+        self.check_tokenize("x = '\"'; y = \"'\"", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     STRING     '\\'"\\''       (1, 4) (1, 7)
@@ -239,25 +203,29 @@
     NAME       'y'           (1, 9) (1, 10)
     OP         '='           (1, 11) (1, 12)
     STRING     '"\\'"'        (1, 13) (1, 16)
-    >>> dump_tokens("x = \\\"doesn't \\\"shrink\\\", does it\\\"")
+    """)
+        self.check_tokenize("x = \"doesn't \"shrink\", does it\"", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     STRING     '"doesn\\'t "' (1, 4) (1, 14)
     NAME       'shrink'      (1, 14) (1, 20)
     STRING     '", does it"' (1, 20) (1, 31)
-    >>> dump_tokens("x = u'abc' + U'ABC'")
+    """)
+        self.check_tokenize("x = u'abc' + U'ABC'", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     STRING     "u'abc'"      (1, 4) (1, 10)
     OP         '+'           (1, 11) (1, 12)
     STRING     "U'ABC'"      (1, 13) (1, 19)
-    >>> dump_tokens('y = u"ABC" + U"ABC"')
+    """)
+        self.check_tokenize('y = u"ABC" + U"ABC"', """\
     NAME       'y'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     STRING     'u"ABC"'      (1, 4) (1, 10)
     OP         '+'           (1, 11) (1, 12)
     STRING     'U"ABC"'      (1, 13) (1, 19)
-    >>> dump_tokens("x = ur'abc' + Ur'ABC' + uR'ABC' + UR'ABC'")
+    """)
+        self.check_tokenize("x = ur'abc' + Ur'ABC' + uR'ABC' + UR'ABC'", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     STRING     "ur'abc'"     (1, 4) (1, 11)
@@ -267,7 +235,8 @@
     STRING     "uR'ABC'"     (1, 24) (1, 31)
     OP         '+'           (1, 32) (1, 33)
     STRING     "UR'ABC'"     (1, 34) (1, 41)
-    >>> dump_tokens('y = ur"abc" + Ur"ABC" + uR"ABC" + UR"ABC"')
+    """)
+        self.check_tokenize('y = ur"abc" + Ur"ABC" + uR"ABC" + UR"ABC"', """\
     NAME       'y'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     STRING     'ur"abc"'     (1, 4) (1, 11)
@@ -278,15 +247,18 @@
     OP         '+'           (1, 32) (1, 33)
     STRING     'UR"ABC"'     (1, 34) (1, 41)
 
-    >>> dump_tokens("b'abc' + B'abc'")
+    """)
+        self.check_tokenize("b'abc' + B'abc'", """\
     STRING     "b'abc'"      (1, 0) (1, 6)
     OP         '+'           (1, 7) (1, 8)
     STRING     "B'abc'"      (1, 9) (1, 15)
-    >>> dump_tokens('b"abc" + B"abc"')
+    """)
+        self.check_tokenize('b"abc" + B"abc"', """\
     STRING     'b"abc"'      (1, 0) (1, 6)
     OP         '+'           (1, 7) (1, 8)
     STRING     'B"abc"'      (1, 9) (1, 15)
-    >>> dump_tokens("br'abc' + bR'abc' + Br'abc' + BR'abc'")
+    """)
+        self.check_tokenize("br'abc' + bR'abc' + Br'abc' + BR'abc'", """\
     STRING     "br'abc'"     (1, 0) (1, 7)
     OP         '+'           (1, 8) (1, 9)
     STRING     "bR'abc'"     (1, 10) (1, 17)
@@ -294,7 +266,8 @@
     STRING     "Br'abc'"     (1, 20) (1, 27)
     OP         '+'           (1, 28) (1, 29)
     STRING     "BR'abc'"     (1, 30) (1, 37)
-    >>> dump_tokens('br"abc" + bR"abc" + Br"abc" + BR"abc"')
+    """)
+        self.check_tokenize('br"abc" + bR"abc" + Br"abc" + BR"abc"', """\
     STRING     'br"abc"'     (1, 0) (1, 7)
     OP         '+'           (1, 8) (1, 9)
     STRING     'bR"abc"'     (1, 10) (1, 17)
@@ -302,10 +275,10 @@
     STRING     'Br"abc"'     (1, 20) (1, 27)
     OP         '+'           (1, 28) (1, 29)
     STRING     'BR"abc"'     (1, 30) (1, 37)
+    """)
 
-Operators
-
-    >>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass")
+    def test_function(self):
+        self.check_tokenize("def d22(a, b, c=2, d=2, *k): pass", """\
     NAME       'def'         (1, 0) (1, 3)
     NAME       'd22'         (1, 4) (1, 7)
     OP         '('           (1, 7) (1, 8)
@@ -326,7 +299,8 @@
     OP         ')'           (1, 26) (1, 27)
     OP         ':'           (1, 27) (1, 28)
     NAME       'pass'        (1, 29) (1, 33)
-    >>> dump_tokens("def d01v_(a=1, *k, **w): pass")
+    """)
+        self.check_tokenize("def d01v_(a=1, *k, **w): pass", """\
     NAME       'def'         (1, 0) (1, 3)
     NAME       'd01v_'       (1, 4) (1, 9)
     OP         '('           (1, 9) (1, 10)
@@ -342,11 +316,12 @@
     OP         ')'           (1, 22) (1, 23)
     OP         ':'           (1, 23) (1, 24)
     NAME       'pass'        (1, 25) (1, 29)
+    """)
 
-Comparison
-
-    >>> dump_tokens("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " +
-    ...             "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass")
+    def test_comparison(self):
+        # Comparison
+        self.check_tokenize("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " +
+                            "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass", """\
     NAME       'if'          (1, 0) (1, 2)
     NUMBER     '1'           (1, 3) (1, 4)
     OP         '<'           (1, 5) (1, 6)
@@ -379,10 +354,11 @@
     NUMBER     '1'           (1, 81) (1, 82)
     OP         ':'           (1, 82) (1, 83)
     NAME       'pass'        (1, 84) (1, 88)
+    """)
 
-Shift
-
-    >>> dump_tokens("x = 1 << 1 >> 5")
+    def test_shift(self):
+        # Shift
+        self.check_tokenize("x = 1 << 1 >> 5", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     NUMBER     '1'           (1, 4) (1, 5)
@@ -390,10 +366,11 @@
     NUMBER     '1'           (1, 9) (1, 10)
     OP         '>>'          (1, 11) (1, 13)
     NUMBER     '5'           (1, 14) (1, 15)
+    """)
 
-Additive
-
-    >>> dump_tokens("x = 1 - y + 15 - 01 + 0x124 + z + a[5]")
+    def test_additive(self):
+        # Additive
+        self.check_tokenize("x = 1 - y + 15 - 01 + 0x124 + z + a[5]", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     NUMBER     '1'           (1, 4) (1, 5)
@@ -412,10 +389,11 @@
     OP         '['           (1, 35) (1, 36)
     NUMBER     '5'           (1, 36) (1, 37)
     OP         ']'           (1, 37) (1, 38)
+    """)
 
-Multiplicative
-
-    >>> dump_tokens("x = 1//1*1/5*12%0x12")
+    def test_multiplicative(self):
+        # Multiplicative
+        self.check_tokenize("x = 1//1*1/5*12%0x12", """\
     NAME       'x'           (1, 0) (1, 1)
     OP         '='           (1, 2) (1, 3)
     NUMBER     '1'           (1, 4) (1, 5)
@@ -429,10 +407,11 @@
     NUMBER     '12'          (1, 13) (1, 15)
     OP         '%'           (1, 15) (1, 16)
     NUMBER     '0x12'        (1, 16) (1, 20)
+    """)
 
-Unary
-
-    >>> dump_tokens("~1 ^ 1 & 1 |1 ^ -1")
+    def test_unary(self):
+        # Unary
+        self.check_tokenize("~1 ^ 1 & 1 |1 ^ -1", """\
     OP         '~'           (1, 0) (1, 1)
     NUMBER     '1'           (1, 1) (1, 2)
     OP         '^'           (1, 3) (1, 4)
@@ -444,7 +423,8 @@
     OP         '^'           (1, 14) (1, 15)
     OP         '-'           (1, 16) (1, 17)
     NUMBER     '1'           (1, 17) (1, 18)
-    >>> dump_tokens("-1*1/1+1*1//1 - ---1**1")
+    """)
+        self.check_tokenize("-1*1/1+1*1//1 - ---1**1", """\
     OP         '-'           (1, 0) (1, 1)
     NUMBER     '1'           (1, 1) (1, 2)
     OP         '*'           (1, 2) (1, 3)
@@ -464,10 +444,12 @@
     NUMBER     '1'           (1, 19) (1, 20)
     OP         '**'          (1, 20) (1, 22)
     NUMBER     '1'           (1, 22) (1, 23)
+    """)
 
-Selector
-
-    >>> dump_tokens("import sys, time\\nx = sys.modules['time'].time()")
+    def test_selector(self):
+        # Selector
+        self.check_tokenize("import sys, time\n"
+                            "x = sys.modules['time'].time()", """\
     NAME       'import'      (1, 0) (1, 6)
     NAME       'sys'         (1, 7) (1, 10)
     OP         ','           (1, 10) (1, 11)
@@ -485,10 +467,12 @@
     NAME       'time'        (2, 24) (2, 28)
     OP         '('           (2, 28) (2, 29)
     OP         ')'           (2, 29) (2, 30)
+    """)
 
-Methods
-
-    >>> dump_tokens("@staticmethod\\ndef foo(x,y): pass")
+    def test_method(self):
+        # Methods
+        self.check_tokenize("@staticmethod\n"
+                            "def foo(x,y): pass", """\
     OP         '@'           (1, 0) (1, 1)
     NAME       'staticmethod (1, 1) (1, 13)
     NEWLINE    '\\n'          (1, 13) (1, 14)
@@ -501,41 +485,13 @@
     OP         ')'           (2, 11) (2, 12)
     OP         ':'           (2, 12) (2, 13)
     NAME       'pass'        (2, 14) (2, 18)
+    """)
 
-Backslash means line continuation, except for comments
-
-    >>> roundtrip("x=1+\\\\n"
-    ...           "1\\n"
-    ...           "# This is a comment\\\\n"
-    ...           "# This also\\n")
-    True
-    >>> roundtrip("# Comment \\\\nx = 0")
-    True
-
-Two string literals on the same line
-
-    >>> roundtrip("'' ''")
-    True
-
-Test roundtrip on random python modules.
-pass the '-ucpu' option to process the full directory.
-
-    >>>
-    >>> tempdir = os.path.dirname(f) or os.curdir
-    >>> testfiles = glob.glob(os.path.join(tempdir, "test*.py"))
-
-    >>> if not test_support.is_resource_enabled("cpu"):
-    ...     testfiles = random.sample(testfiles, 10)
-    ...
-    >>> for testfile in testfiles:
-    ...     if not roundtrip(open(testfile)):
-    ...         print "Roundtrip failed for file %s" % testfile
-    ...         break
-    ... else: True
-    True
-
-Evil tabs
-    >>> dump_tokens("def f():\\n\\tif x\\n        \\tpass")
+    def test_tabs(self):
+        # Evil tabs
+        self.check_tokenize("def f():\n"
+                            "\tif x\n"
+                            "        \tpass", """\
     NAME       'def'         (1, 0) (1, 3)
     NAME       'f'           (1, 4) (1, 5)
     OP         '('           (1, 5) (1, 6)
@@ -550,56 +506,16 @@
     NAME       'pass'        (3, 9) (3, 13)
     DEDENT     ''            (4, 0) (4, 0)
     DEDENT     ''            (4, 0) (4, 0)
+    """)
 
-Pathological whitespace (http://bugs.python.org/issue16152)
-    >>> dump_tokens("@          ")
+    def test_pathological_trailing_whitespace(self):
+        # Pathological whitespace (http://bugs.python.org/issue16152)
+        self.check_tokenize("@          ", """\
     OP         '@'           (1, 0) (1, 1)
-"""
+    """)
 
 
-from test import test_support
-from tokenize import (untokenize, generate_tokens, NUMBER, NAME, OP,
-                     STRING, ENDMARKER, tok_name, Untokenizer)
-from StringIO import StringIO
-import os
-from unittest import TestCase
-
-def dump_tokens(s):
-    """Print out the tokens in s in a table format.
-
-    The ENDMARKER is omitted.
-    """
-    f = StringIO(s)
-    for type, token, start, end, line in generate_tokens(f.readline):
-        if type == ENDMARKER:
-            break
-        type = tok_name[type]
-        print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals())
-
-# This is an example from the docs, set up as a doctest.
 def decistmt(s):
-    """Substitute Decimals for floats in a string of statements.
-
-    >>> from decimal import Decimal
-    >>> s = 'print +21.3e-5*-.1234/81.7'
-    >>> decistmt(s)
-    "print +Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')"
-
-    The format of the exponent is inherited from the platform C library.
-    Known cases are "e-007" (Windows) and "e-07" (not Windows).  Since
-    we're only showing 12 digits, and the 13th isn't close to 5, the
-    rest of the output should be platform-independent.
-
-    >>> exec(s) #doctest: +ELLIPSIS
-    -3.21716034272e-0...7
-
-    Output from calculations with Decimal should be identical across all
-    platforms.
-
-    >>> exec(decistmt(s))
-    -3.217160342717258261933904529E-7
-    """
-
     result = []
     g = generate_tokens(StringIO(s).readline)   # tokenize the string
     for toknum, tokval, _, _, _  in g:
@@ -614,6 +530,27 @@
             result.append((toknum, tokval))
     return untokenize(result)
 
+class TestMisc(TestCase):
+
+    def test_decistmt(self):
+        # Substitute Decimals for floats in a string of statements.
+        # This is an example from the docs.
+
+        from decimal import Decimal
+        s = '+21.3e-5*-.1234/81.7'
+        self.assertEqual(decistmt(s),
+                         "+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')")
+
+        # The format of the exponent is inherited from the platform C library.
+        # Known cases are "e-007" (Windows) and "e-07" (not Windows).  Since
+        # we're only showing 12 digits, and the 13th isn't close to 5, the
+        # rest of the output should be platform-independent.
+        self.assertRegexpMatches(str(eval(s)), '-3.21716034272e-0+7')
+
+        # Output from calculations with Decimal should be identical across all
+        # platforms.
+        self.assertEqual(eval(decistmt(s)), Decimal('-3.217160342717258261933904529E-7'))
+
 
 class UntokenizeTest(TestCase):
 
@@ -650,12 +587,137 @@
         self.assertEqual(u.untokenize(iter([token])), 'Hello ')
 
 
-__test__ = {"doctests" : doctests, 'decistmt': decistmt}
+class TestRoundtrip(TestCase):
+
+    def check_roundtrip(self, f):
+        """
+        Test roundtrip for `untokenize`. `f` is an open file or a string.
+        The source code in f is tokenized, converted back to source code
+        via tokenize.untokenize(), and tokenized again from the latter.
+        The test fails if the second tokenization doesn't match the first.
+        """
+        if isinstance(f, str): f = StringIO(f)
+        token_list = list(generate_tokens(f.readline))
+        f.close()
+        tokens1 = [tok[:2] for tok in token_list]
+        new_text = untokenize(tokens1)
+        readline = iter(new_text.splitlines(1)).next
+        tokens2 = [tok[:2] for tok in generate_tokens(readline)]
+        self.assertEqual(tokens2, tokens1)
+
+    def test_roundtrip(self):
+        # There are some standard formatting practices that are easy to get right.
+
+        self.check_roundtrip("if x == 1:\n"
+                             "    print(x)\n")
+
+        # There are some standard formatting practices that are easy to get right.
+
+        self.check_roundtrip("if x == 1:\n"
+                             "    print x\n")
+        self.check_roundtrip("# This is a comment\n"
+                             "# This also")
+
+        # Some people use different formatting conventions, which makes
+        # untokenize a little trickier. Note that this test involves trailing
+        # whitespace after the colon. Note that we use hex escapes to make the
+        # two trailing blanks apperant in the expected output.
+
+        self.check_roundtrip("if x == 1 : \n"
+                             "  print x\n")
+        fn = test_support.findfile("tokenize_tests" + os.extsep + "txt")
+        with open(fn) as f:
+            self.check_roundtrip(f)
+        self.check_roundtrip("if x == 1:\n"
+                             "    # A comment by itself.\n"
+                             "    print x # Comment here, too.\n"
+                             "    # Another comment.\n"
+                             "after_if = True\n")
+        self.check_roundtrip("if (x # The comments need to go in the right place\n"
+                             "    == 1):\n"
+                             "    print 'x==1'\n")
+        self.check_roundtrip("class Test: # A comment here\n"
+                             "  # A comment with weird indent\n"
+                             "  after_com = 5\n"
+                             "  def x(m): return m*5 # a one liner\n"
+                             "  def y(m): # A whitespace after the colon\n"
+                             "     return y*4 # 3-space indent\n")
+
+        # Some error-handling code
+
+        self.check_roundtrip("try: import somemodule\n"
+                             "except ImportError: # comment\n"
+                             "    print 'Can not import' # comment2\n"
+                             "else:   print 'Loaded'\n")
+
+    def test_continuation(self):
+        # Balancing continuation
+        self.check_roundtrip("a = (3,4, \n"
+                             "5,6)\n"
+                             "y = [3, 4,\n"
+                             "5]\n"
+                             "z = {'a': 5,\n"
+                             "'b':15, 'c':True}\n"
+                             "x = len(y) + 5 - a[\n"
+                             "3] - a[2]\n"
+                             "+ len(z) - z[\n"
+                             "'b']\n")
+
+    def test_backslash_continuation(self):
+        # Backslash means line continuation, except for comments
+        self.check_roundtrip("x=1+\\\n"
+                             "1\n"
+                             "# This is a comment\\\n"
+                             "# This also\n")
+        self.check_roundtrip("# Comment \\\n"
+                             "x = 0")
+
+    def test_string_concatenation(self):
+        # Two string literals on the same line
+        self.check_roundtrip("'' ''")
+
+    def test_random_files(self):
+        # Test roundtrip on random python modules.
+        # pass the '-ucpu' option to process the full directory.
+
+        import glob, random
+        fn = test_support.findfile("tokenize_tests" + os.extsep + "txt")
+        tempdir = os.path.dirname(fn) or os.curdir
+        testfiles = glob.glob(os.path.join(tempdir, "test*.py"))
+
+        if not test_support.is_resource_enabled("cpu"):
+            testfiles = random.sample(testfiles, 10)
+
+        for testfile in testfiles:
+            try:
+                with open(testfile, 'rb') as f:
+                    self.check_roundtrip(f)
+            except:
+                print "Roundtrip failed for file %s" % testfile
+                raise
+
+
+    def roundtrip(self, code):
+        if isinstance(code, str):
+            code = code.encode('utf-8')
+        tokens = generate_tokens(StringIO(code).readline)
+        return untokenize(tokens).decode('utf-8')
+
+    def test_indentation_semantics_retained(self):
+        """
+        Ensure that although whitespace might be mutated in a roundtrip,
+        the semantic meaning of the indentation remains consistent.
+        """
+        code = "if False:\n\tx=3\n\tx=3\n"
+        codelines = self.roundtrip(code).split('\n')
+        self.assertEqual(codelines[1], codelines[2])
+
 
 def test_main():
-    from test import test_tokenize
-    test_support.run_doctest(test_tokenize, True)
+    test_support.run_unittest(TokenizeTest)
     test_support.run_unittest(UntokenizeTest)
+    test_support.run_unittest(TestRoundtrip)
+    test_support.run_unittest(TestMisc)
 
 if __name__ == "__main__":
     test_main()
diff --git a/lib/python2.7/test/test_traceback.py b/lib/python2.7/test/test_traceback.py
index 8b0322b..03c4415 100644
--- a/lib/python2.7/test/test_traceback.py
+++ b/lib/python2.7/test/test_traceback.py
@@ -4,7 +4,8 @@
 import sys
 import unittest
 from imp import reload
-from test.test_support import run_unittest, is_jython, Error, cpython_only
+from test.test_support import (run_unittest, is_jython, Error, cpython_only,
+                               captured_output)
 
 import traceback
 
@@ -206,9 +207,53 @@
         self.assertTrue(location.startswith('  File'))
         self.assertTrue(source_line.startswith('    raise'))
 
+    def test_print_stack(self):
+        def prn():
+            traceback.print_stack()
+        with captured_output("stderr") as stderr:
+            prn()
+        lineno = prn.__code__.co_firstlineno
+        file = prn.__code__.co_filename
+        self.assertEqual(stderr.getvalue().splitlines()[-4:], [
+            '  File "%s", line %d, in test_print_stack' % (file, lineno+3),
+            '    prn()',
+            '  File "%s", line %d, in prn' % (file, lineno+1),
+            '    traceback.print_stack()',
+        ])
+
+    def test_format_stack(self):
+        def fmt():
+            return traceback.format_stack()
+        result = fmt()
+        lineno = fmt.__code__.co_firstlineno
+        file = fmt.__code__.co_filename
+        self.assertEqual(result[-2:], [
+            '  File "%s", line %d, in test_format_stack\n'
+            '    result = fmt()\n' % (file, lineno+2),
+            '  File "%s", line %d, in fmt\n'
+            '    return traceback.format_stack()\n' % (file, lineno+1),
+        ])
+
+
+class MiscTracebackCases(unittest.TestCase):
+    #
+    # Check non-printing functions in traceback module
+    #
+
+    def test_extract_stack(self):
+        def extract():
+            return traceback.extract_stack()
+        result = extract()
+        lineno = extract.__code__.co_firstlineno
+        file = extract.__code__.co_filename
+        self.assertEqual(result[-2:], [
+            (file, lineno+2, 'test_extract_stack', 'result = extract()'),
+            (file, lineno+1, 'extract', 'return traceback.extract_stack()'),
+        ])
+
 
 def test_main():
-    run_unittest(TracebackCases, TracebackFormatTests)
+    run_unittest(TracebackCases, TracebackFormatTests, MiscTracebackCases)
 
 
 if __name__ == "__main__":
diff --git a/lib/python2.7/test/test_unicode.py b/lib/python2.7/test/test_unicode.py
index 625d08c..63fb831 100644
--- a/lib/python2.7/test/test_unicode.py
+++ b/lib/python2.7/test/test_unicode.py
@@ -33,6 +33,9 @@
         return None
 codecs.register(search_function)
 
+class UnicodeSubclass(unicode):
+    pass
+
 class UnicodeTest(
     string_tests.CommonTest,
     string_tests.MixinStrUnicodeUserStringTest,
@@ -685,9 +688,6 @@
             u'unicode remains unicode'
         )
 
-        class UnicodeSubclass(unicode):
-            pass
-
         self.assertEqual(
             unicode(UnicodeSubclass('unicode subclass becomes unicode')),
             u'unicode subclass becomes unicode'
@@ -1036,10 +1036,13 @@
         self.assertRaises(UnicodeError, unicode, 'Andr\202 x', 'ascii','strict')
         self.assertEqual(unicode('Andr\202 x','ascii','ignore'), u"Andr x")
         self.assertEqual(unicode('Andr\202 x','ascii','replace'), u'Andr\uFFFD x')
-        self.assertEqual(u'abcde'.decode('ascii', 'ignore'),
-                         u'abcde'.decode('ascii', errors='ignore'))
-        self.assertEqual(u'abcde'.decode('ascii', 'replace'),
-                         u'abcde'.decode(encoding='ascii', errors='replace'))
+        self.assertEqual(unicode('\202 x', 'ascii', 'replace'), u'\uFFFD x')
+        with test_support.check_py3k_warnings():
+            self.assertEqual(u'abcde'.decode('ascii', 'ignore'),
+                             u'abcde'.decode('ascii', errors='ignore'))
+        with test_support.check_py3k_warnings():
+            self.assertEqual(u'abcde'.decode('ascii', 'replace'),
+                             u'abcde'.decode(encoding='ascii', errors='replace'))
 
         # Error handling (unknown character names)
         self.assertEqual("\\N{foo}xx".decode("unicode-escape", "ignore"), u"xx")
@@ -1268,6 +1271,9 @@
         self.assertEqual(unicode(Foo6("bar")), u"foou")
         self.assertEqual(unicode(Foo7("bar")), u"foou")
         self.assertEqual(unicode(Foo8("foo")), u"foofoo")
+        self.assertIs(type(unicode(Foo8("foo"))), Foo8)
+        self.assertEqual(UnicodeSubclass(Foo8("foo")), u"foofoo")
+        self.assertIs(type(UnicodeSubclass(Foo8("foo"))), UnicodeSubclass)
         self.assertEqual(str(Foo9("foo")), "string")
         self.assertEqual(unicode(Foo9("foo")), u"not unicode")
 
diff --git a/lib/python2.7/test/test_unicode_file.py b/lib/python2.7/test/test_unicode_file.py
index f04bad3..ae2d9d5 100644
--- a/lib/python2.7/test/test_unicode_file.py
+++ b/lib/python2.7/test/test_unicode_file.py
@@ -5,7 +5,7 @@
 import unicodedata
 
 import unittest
-from test.test_support import run_unittest, TESTFN_UNICODE
+from test.test_support import run_unittest, change_cwd, TESTFN_UNICODE
 from test.test_support import TESTFN_ENCODING, TESTFN_UNENCODABLE
 try:
     TESTFN_ENCODED = TESTFN_UNICODE.encode(TESTFN_ENCODING)
@@ -114,13 +114,11 @@
         os.unlink(filename1 + ".new")
 
     def _do_directory(self, make_name, chdir_name, encoded):
-        cwd = os.getcwd()
         if os.path.isdir(make_name):
             os.rmdir(make_name)
         os.mkdir(make_name)
         try:
-            os.chdir(chdir_name)
-            try:
+            with change_cwd(chdir_name):
                 if not encoded:
                     cwd_result = os.getcwdu()
                     name_result = make_name
@@ -132,8 +130,6 @@
                 name_result = unicodedata.normalize("NFD", name_result)
 
                 self.assertEqual(os.path.basename(cwd_result),name_result)
-            finally:
-                os.chdir(cwd)
         finally:
             os.rmdir(make_name)
 
diff --git a/lib/python2.7/test/test_urllib.py b/lib/python2.7/test/test_urllib.py
index adffb57..e14cccc 100644
--- a/lib/python2.7/test/test_urllib.py
+++ b/lib/python2.7/test/test_urllib.py
@@ -209,10 +209,26 @@
 Content-Type: text/html; charset=iso-8859-1
 """)
         try:
-            self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
+            msg = "Redirection to url 'file:"
+            with self.assertRaisesRegexp(IOError, msg):
+                urllib.urlopen("http://python.org/")
         finally:
             self.unfakehttp()
 
+    def test_redirect_limit_independent(self):
+        # Ticket #12923: make sure independent requests each use their
+        # own retry limit.
+        for i in range(urllib.FancyURLopener().maxtries):
+            self.fakehttp(b'''HTTP/1.1 302 Found
+Location: file://guidocomputer.athome.com:/python/license
+Connection: close
+''')
+            try:
+                self.assertRaises(IOError, urllib.urlopen,
+                    "http://something")
+            finally:
+                self.unfakehttp()
+
     def test_empty_socket(self):
         # urlopen() raises IOError if the underlying socket does not send any
         # data. (#1680230)
diff --git a/lib/python2.7/test/test_urllib2.py b/lib/python2.7/test/test_urllib2.py
index 32ffd0a..a6889cc 100644
--- a/lib/python2.7/test/test_urllib2.py
+++ b/lib/python2.7/test/test_urllib2.py
@@ -6,7 +6,7 @@
 import StringIO
 
 import urllib2
-from urllib2 import Request, OpenerDirector
+from urllib2 import Request, OpenerDirector, AbstractDigestAuthHandler
 
 try:
     import ssl
@@ -1290,6 +1290,16 @@
         else:
             self.assertTrue(False)
 
+    def test_unsupported_algorithm(self):
+        handler = AbstractDigestAuthHandler()
+        with self.assertRaises(ValueError) as exc:
+            handler.get_algorithm_impls('invalid')
+        self.assertEqual(
+            str(exc.exception),
+            "Unsupported digest authentication algorithm 'invalid'"
+        )
+
+
 class RequestTests(unittest.TestCase):
 
     def setUp(self):
@@ -1350,6 +1360,11 @@
         req = Request(url)
         self.assertEqual(req.get_full_url(), url)
 
+    def test_private_attributes(self):
+        self.assertFalse(hasattr(self.get, '_Request__r_xxx'))
+        # Issue #6500: infinite recursion
+        self.assertFalse(hasattr(self.get, '_Request__r_method'))
+
     def test_HTTPError_interface(self):
         """
         Issue 13211 reveals that HTTPError didn't implement the URLError
diff --git a/lib/python2.7/test/test_userdict.py b/lib/python2.7/test/test_userdict.py
index d5cecd8..99526a2 100644
--- a/lib/python2.7/test/test_userdict.py
+++ b/lib/python2.7/test/test_userdict.py
@@ -2,6 +2,7 @@
 
 from test import test_support, mapping_tests
 import UserDict
+import warnings
 
 d0 = {}
 d1 = {"one": 1}
@@ -29,7 +30,9 @@
         self.assertEqual(UserDict.UserDict(one=1, two=2), d2)
         # item sequence constructor
         self.assertEqual(UserDict.UserDict([('one',1), ('two',2)]), d2)
-        self.assertEqual(UserDict.UserDict(dict=[('one',1), ('two',2)]), d2)
+        with test_support.check_warnings((".*'dict'.*",
+                                          PendingDeprecationWarning)):
+            self.assertEqual(UserDict.UserDict(dict=[('one',1), ('two',2)]), d2)
         # both together
         self.assertEqual(UserDict.UserDict([('one',1), ('two',2)], two=3, three=5), d3)
 
@@ -148,6 +151,36 @@
         self.assertEqual(t.popitem(), ("x", 42))
         self.assertRaises(KeyError, t.popitem)
 
+    def test_init(self):
+        for kw in 'self', 'other', 'iterable':
+            self.assertEqual(list(UserDict.UserDict(**{kw: 42}).items()),
+                             [(kw, 42)])
+        self.assertEqual(list(UserDict.UserDict({}, dict=42).items()),
+                         [('dict', 42)])
+        self.assertEqual(list(UserDict.UserDict({}, dict=None).items()),
+                         [('dict', None)])
+        with test_support.check_warnings((".*'dict'.*",
+                                          PendingDeprecationWarning)):
+            self.assertEqual(list(UserDict.UserDict(dict={'a': 42}).items()),
+                             [('a', 42)])
+        self.assertRaises(TypeError, UserDict.UserDict, 42)
+        self.assertRaises(TypeError, UserDict.UserDict, (), ())
+        self.assertRaises(TypeError, UserDict.UserDict.__init__)
+
+    def test_update(self):
+        for kw in 'self', 'other', 'iterable':
+            d = UserDict.UserDict()
+            d.update(**{kw: 42})
+            self.assertEqual(list(d.items()), [(kw, 42)])
+        d = UserDict.UserDict()
+        with test_support.check_warnings((".*'dict'.*",
+                                          PendingDeprecationWarning)):
+            d.update(dict={'a': 42})
+        self.assertEqual(list(d.items()), [('a', 42)])
+        self.assertRaises(TypeError, UserDict.UserDict().update, 42)
+        self.assertRaises(TypeError, UserDict.UserDict().update, {}, {})
+        self.assertRaises(TypeError, UserDict.UserDict.update)
+
     def test_missing(self):
         # Make sure UserDict doesn't have a __missing__ method
         self.assertEqual(hasattr(UserDict, "__missing__"), False)
@@ -155,7 +188,7 @@
         # (D) subclass defines __missing__ method returning a value
         # (E) subclass defines __missing__ method raising RuntimeError
         # (F) subclass sets __missing__ instance variable (no effect)
-        # (G) subclass doesn't define __missing__ at a all
+        # (G) subclass doesn't define __missing__ at all
         class D(UserDict.UserDict):
             def __missing__(self, key):
                 return 42
diff --git a/lib/python2.7/test/test_warnings.py b/lib/python2.7/test/test_warnings.py
index 7a9459a..11dc294 100644
--- a/lib/python2.7/test/test_warnings.py
+++ b/lib/python2.7/test/test_warnings.py
@@ -593,6 +593,63 @@
         self.assertEqual(expect, self.module.formatwarning(message,
                                     category, file_name, line_num, file_line))
 
+    @test_support.requires_unicode
+    def test_formatwarning_unicode_msg(self):
+        message = u"msg"
+        category = Warning
+        file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
+        line_num = 3
+        file_line = linecache.getline(file_name, line_num).strip()
+        format = "%s:%s: %s: %s\n  %s\n"
+        expect = format % (file_name, line_num, category.__name__, message,
+                            file_line)
+        self.assertEqual(expect, self.module.formatwarning(message,
+                                                category, file_name, line_num))
+        # Test the 'line' argument.
+        file_line += " for the win!"
+        expect = format % (file_name, line_num, category.__name__, message,
+                            file_line)
+        self.assertEqual(expect, self.module.formatwarning(message,
+                                    category, file_name, line_num, file_line))
+
+    @test_support.requires_unicode
+    @unittest.skipUnless(test_support.FS_NONASCII, 'need test_support.FS_NONASCII')
+    def test_formatwarning_unicode_msg_nonascii_filename(self):
+        message = u"msg"
+        category = Warning
+        unicode_file_name = test_support.FS_NONASCII + u'.py'
+        file_name = unicode_file_name.encode(sys.getfilesystemencoding())
+        line_num = 3
+        file_line = 'spam'
+        format = "%s:%s: %s: %s\n  %s\n"
+        expect = format % (file_name, line_num, category.__name__, str(message),
+                            file_line)
+        self.assertEqual(expect, self.module.formatwarning(message,
+                                    category, file_name, line_num, file_line))
+        message = u"\xb5sg"
+        expect = format % (unicode_file_name, line_num, category.__name__, message,
+                            file_line)
+        self.assertEqual(expect, self.module.formatwarning(message,
+                                    category, file_name, line_num, file_line))
+
+    @test_support.requires_unicode
+    def test_formatwarning_unicode_msg_nonascii_fileline(self):
+        message = u"msg"
+        category = Warning
+        file_name = 'file.py'
+        line_num = 3
+        file_line = 'sp\xe4m'
+        format = "%s:%s: %s: %s\n  %s\n"
+        expect = format % (file_name, line_num, category.__name__, str(message),
+                            file_line)
+        self.assertEqual(expect, self.module.formatwarning(message,
+                                    category, file_name, line_num, file_line))
+        message = u"\xb5sg"
+        expect = format % (file_name, line_num, category.__name__, message,
+                            unicode(file_line, 'latin1'))
+        self.assertEqual(expect, self.module.formatwarning(message,
+                                    category, file_name, line_num, file_line))
+
     def test_showwarning(self):
         file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
         line_num = 3
diff --git a/lib/python2.7/test/test_weakref.py b/lib/python2.7/test/test_weakref.py
index cc0a755..b7f985c 100644
--- a/lib/python2.7/test/test_weakref.py
+++ b/lib/python2.7/test/test_weakref.py
@@ -1197,6 +1197,18 @@
             dict[o] = o.arg
         return dict, objects
 
+    def test_make_weak_valued_dict_misc(self):
+        # errors
+        self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
+        self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
+        self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
+        # special keyword arguments
+        o = Object(3)
+        for kw in 'self', 'other', 'iterable':
+            d = weakref.WeakValueDictionary(**{kw: o})
+            self.assertEqual(list(d.keys()), [kw])
+            self.assertEqual(d[kw], o)
+
     def make_weak_valued_dict(self):
         dict = weakref.WeakValueDictionary()
         objects = map(Object, range(self.COUNT))
@@ -1279,6 +1291,19 @@
     def test_weak_valued_dict_update(self):
         self.check_update(weakref.WeakValueDictionary,
                           {1: C(), 'a': C(), C(): C()})
+        # errors
+        self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
+        d = weakref.WeakValueDictionary()
+        self.assertRaises(TypeError, d.update, {}, {})
+        self.assertRaises(TypeError, d.update, (), ())
+        self.assertEqual(list(d.keys()), [])
+        # special keyword arguments
+        o = Object(3)
+        for kw in 'self', 'dict', 'other', 'iterable':
+            d = weakref.WeakValueDictionary()
+            d.update(**{kw: o})
+            self.assertEqual(list(d.keys()), [kw])
+            self.assertEqual(d[kw], o)
 
     def test_weak_keyed_dict_update(self):
         self.check_update(weakref.WeakKeyDictionary,
diff --git a/lib/python2.7/test/test_wsgiref.py b/lib/python2.7/test/test_wsgiref.py
index 40fc35e..2469f67 100644
--- a/lib/python2.7/test/test_wsgiref.py
+++ b/lib/python2.7/test/test_wsgiref.py
@@ -1,14 +1,14 @@
-from __future__ import nested_scopes    # Backward compat for 2.1
 from unittest import TestCase
 from wsgiref.util import setup_testing_defaults
 from wsgiref.headers import Headers
 from wsgiref.handlers import BaseHandler, BaseCGIHandler
 from wsgiref import util
 from wsgiref.validate import validator
-from wsgiref.simple_server import WSGIServer, WSGIRequestHandler, demo_app
+from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
 from wsgiref.simple_server import make_server
 from StringIO import StringIO
 from SocketServer import BaseServer
+
 import os
 import re
 import sys
diff --git a/lib/python2.7/test/test_xml_etree_c.py b/lib/python2.7/test/test_xml_etree_c.py
index 474a4b4..98410c5 100644
--- a/lib/python2.7/test/test_xml_etree_c.py
+++ b/lib/python2.7/test/test_xml_etree_c.py
@@ -30,6 +30,38 @@
         finally:
             data = None
 
+    def test_del_attribute(self):
+        element = cET.Element('tag')
+
+        element.tag = 'TAG'
+        with self.assertRaises(AttributeError):
+            del element.tag
+        self.assertEqual(element.tag, 'TAG')
+
+        with self.assertRaises(AttributeError):
+            del element.text
+        self.assertIsNone(element.text)
+        element.text = 'TEXT'
+        with self.assertRaises(AttributeError):
+            del element.text
+        self.assertEqual(element.text, 'TEXT')
+
+        with self.assertRaises(AttributeError):
+            del element.tail
+        self.assertIsNone(element.tail)
+        element.tail = 'TAIL'
+        with self.assertRaises(AttributeError):
+            del element.tail
+        self.assertEqual(element.tail, 'TAIL')
+
+        with self.assertRaises(AttributeError):
+            del element.attrib
+        self.assertEqual(element.attrib, {})
+        element.attrib = {'A': 'B', 'C': 'D'}
+        with self.assertRaises(AttributeError):
+            del element.attrib
+        self.assertEqual(element.attrib, {'A': 'B', 'C': 'D'})
+
 
 def test_main():
     from test import test_xml_etree, test_xml_etree_c
diff --git a/lib/python2.7/test/test_xmlrpc.py b/lib/python2.7/test/test_xmlrpc.py
index 2bb3978..ca8d5d8 100644
--- a/lib/python2.7/test/test_xmlrpc.py
+++ b/lib/python2.7/test/test_xmlrpc.py
@@ -23,13 +23,6 @@
 except ImportError:
     gzip = None
 
-try:
-    unicode
-except NameError:
-    have_unicode = False
-else:
-    have_unicode = True
-
 alist = [{'astring': 'foo@bar.baz.spam',
           'afloat': 7283.43,
           'anint': 2**20,
@@ -37,8 +30,6 @@
           'anotherlist': ['.zyx.41'],
           'abase64': xmlrpclib.Binary("my dog has fleas"),
           'boolean': xmlrpclib.False,
-          'unicode': u'\u4000\u6000\u8000',
-          u'ukey\u4000': 'regular value',
           'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
           'datetime2': xmlrpclib.DateTime(
                         (2005, 02, 10, 11, 41, 23, 0, 1, -1)),
@@ -46,6 +37,12 @@
                         datetime.datetime(2005, 02, 10, 11, 41, 23)),
           }]
 
+if test_support.have_unicode:
+    alist[0].update({
+          'unicode': test_support.u(r'\u4000\u6000\u8000'),
+          test_support.u(r'ukey\u4000'): 'regular value',
+    })
+
 class XMLRPCTestCase(unittest.TestCase):
 
     def test_dump_load(self):
@@ -150,6 +147,25 @@
                          xmlrpclib.loads(strg)[0][0])
         self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
 
+    @test_support.requires_unicode
+    def test_dump_encoding(self):
+        value = {test_support.u(r'key\u20ac\xa4'):
+                 test_support.u(r'value\u20ac\xa4')}
+        strg = xmlrpclib.dumps((value,), encoding='iso-8859-15')
+        strg = "<?xml version='1.0' encoding='iso-8859-15'?>" + strg
+        self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
+
+        strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
+                               methodresponse=True)
+        self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
+
+        methodname = test_support.u(r'method\u20ac\xa4')
+        strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
+                               methodname=methodname)
+        self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
+        self.assertEqual(xmlrpclib.loads(strg)[1], methodname)
+
+    @test_support.requires_unicode
     def test_default_encoding_issues(self):
         # SF bug #1115989: wrong decoding in '_stringify'
         utf8 = """<?xml version='1.0' encoding='iso-8859-1'?>
@@ -182,7 +198,7 @@
                 temp_sys.setdefaultencoding(old_encoding)
 
         items = d.items()
-        if have_unicode:
+        if test_support.have_unicode:
             self.assertEqual(s, u"abc \x95")
             self.assertIsInstance(s, unicode)
             self.assertEqual(items, [(u"def \x96", u"ghi \x97")])
@@ -282,7 +298,7 @@
 # The evt is set twice.  First when the server is ready to serve.
 # Second when the server has been shutdown.  The user must clear
 # the event after it has been set the first time to catch the second set.
-def http_server(evt, numrequests, requestHandler=None):
+def http_server(evt, numrequests, requestHandler=None, encoding=None):
     class TestInstanceClass:
         def div(self, x, y):
             return x // y
@@ -306,6 +322,7 @@
     if not requestHandler:
         requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
     serv = MyXMLRPCServer(("localhost", 0), requestHandler,
+                          encoding=encoding,
                           logRequests=False, bind_and_activate=False)
     try:
         serv.socket.settimeout(3)
@@ -322,6 +339,7 @@
         serv.register_multicall_functions()
         serv.register_function(pow)
         serv.register_function(lambda x,y: x+y, 'add')
+        serv.register_function(lambda x: x, test_support.u(r't\xea\u0161t'))
         serv.register_function(my_function)
         serv.register_instance(TestInstanceClass())
         evt.set()
@@ -463,9 +481,10 @@
                 # protocol error; provide additional information in test output
                 self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
 
+    @test_support.requires_unicode
     def test_nonascii(self):
-        start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t'
-        end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n'
+        start_string = test_support.u(r'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t')
+        end_string = test_support.u(r'h\N{LATIN SMALL LETTER O WITH HORN}n')
 
         try:
             p = xmlrpclib.ServerProxy(URL)
@@ -477,10 +496,38 @@
                 # protocol error; provide additional information in test output
                 self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
 
+    @test_support.requires_unicode
     def test_unicode_host(self):
         server = xmlrpclib.ServerProxy(u"http://%s:%d/RPC2"%(ADDR, PORT))
         self.assertEqual(server.add("a", u"\xe9"), u"a\xe9")
 
+    @test_support.requires_unicode
+    def test_client_encoding(self):
+        start_string = unichr(0x20ac)
+        end_string = unichr(0xa4)
+
+        try:
+            p = xmlrpclib.ServerProxy(URL, encoding='iso-8859-15')
+            self.assertEqual(p.add(start_string, end_string),
+                             start_string + end_string)
+        except (xmlrpclib.ProtocolError, socket.error) as e:
+            # ignore failures due to non-blocking socket unavailable errors.
+            if not is_unavailable_exception(e):
+                # protocol error; provide additional information in test output
+                self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
+
+    @test_support.requires_unicode
+    def test_nonascii_methodname(self):
+        try:
+            p = xmlrpclib.ServerProxy(URL, encoding='iso-8859-15')
+            m = getattr(p, 't\xea\xa8t')
+            self.assertEqual(m(42), 42)
+        except (xmlrpclib.ProtocolError, socket.error) as e:
+            # ignore failures due to non-blocking socket unavailable errors.
+            if not is_unavailable_exception(e):
+                # protocol error; provide additional information in test output
+                self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
+
     # [ch] The test 404 is causing lots of false alarms.
     def XXXtest_404(self):
         # send POST with httplib, it should return 404 header and
@@ -498,6 +545,7 @@
             p = xmlrpclib.ServerProxy(URL)
             meth = p.system.listMethods()
             expected_methods = set(['pow', 'div', 'my_function', 'add',
+                                    test_support.u(r't\xea\u0161t'),
                                     'system.listMethods', 'system.methodHelp',
                                     'system.methodSignature', 'system.multicall'])
             self.assertEqual(set(meth), expected_methods)
@@ -600,6 +648,27 @@
         conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye')
         conn.close()
 
+class SimpleServerEncodingTestCase(BaseServerTestCase):
+    @staticmethod
+    def threadFunc(evt, numrequests, requestHandler=None, encoding=None):
+        http_server(evt, numrequests, requestHandler, 'iso-8859-15')
+
+    @test_support.requires_unicode
+    def test_server_encoding(self):
+        start_string = unichr(0x20ac)
+        end_string = unichr(0xa4)
+
+        try:
+            p = xmlrpclib.ServerProxy(URL)
+            self.assertEqual(p.add(start_string, end_string),
+                             start_string + end_string)
+        except (xmlrpclib.ProtocolError, socket.error) as e:
+            # ignore failures due to non-blocking socket unavailable errors.
+            if not is_unavailable_exception(e):
+                # protocol error; provide additional information in test output
+                self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
+
+
 class MultiPathServerTestCase(BaseServerTestCase):
     threadFunc = staticmethod(http_multi_server)
     request_count = 2
@@ -1032,6 +1101,7 @@
     xmlrpc_tests = [XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
          BinaryTestCase, FaultTestCase, TransportSubclassTestCase]
     xmlrpc_tests.append(SimpleServerTestCase)
+    xmlrpc_tests.append(SimpleServerEncodingTestCase)
     xmlrpc_tests.append(KeepaliveServerTestCase1)
     xmlrpc_tests.append(KeepaliveServerTestCase2)
     xmlrpc_tests.append(GzipServerTestCase)
diff --git a/lib/python2.7/test/test_xpickle.py b/lib/python2.7/test/test_xpickle.py
index 95ad4eb..5cb7b25 100644
--- a/lib/python2.7/test/test_xpickle.py
+++ b/lib/python2.7/test/test_xpickle.py
@@ -169,6 +169,14 @@
                 u2 = self.loads(p)
                 self.assertEqual(u2, u)
 
+    # The ability to pickle recursive objects was added in 2.7.11 to fix
+    # a crash in CPickle (issue #892902).
+    test_recursive_list_subclass_and_inst = None
+    test_recursive_tuple_subclass_and_inst = None
+    test_recursive_dict_subclass_and_inst = None
+    test_recursive_set_and_inst = None
+    test_recursive_frozenset_and_inst = None
+
 
 # Test backwards compatibility with Python 2.4.
 class CPicklePython24Compat(AbstractCompatTests):
diff --git a/lib/python2.7/test/test_zipfile.py b/lib/python2.7/test/test_zipfile.py
index 71c6605..01750c1 100644
--- a/lib/python2.7/test/test_zipfile.py
+++ b/lib/python2.7/test/test_zipfile.py
@@ -778,6 +778,13 @@
     def requiresWriteAccess(self, path):
         if not os.access(path, os.W_OK):
             self.skipTest('requires write access to the installed location')
+        filename = os.path.join(path, 'test_zipfile.try')
+        try:
+            fd = os.open(filename, os.O_WRONLY | os.O_CREAT)
+            os.close(fd)
+        except Exception:
+            self.skipTest('requires write access to the installed location')
+        unlink(filename)
 
     def test_write_pyfile(self):
         self.requiresWriteAccess(os.path.dirname(__file__))
diff --git a/lib/python2.7/test/test_zipfile64.py b/lib/python2.7/test/test_zipfile64.py
index a87baaa..151baf2 100644
--- a/lib/python2.7/test/test_zipfile64.py
+++ b/lib/python2.7/test/test_zipfile64.py
@@ -79,15 +79,19 @@
     def testStored(self):
         # Try the temp file first.  If we do TESTFN2 first, then it hogs
         # gigabytes of disk space for the duration of the test.
-        for f in TemporaryFile(), TESTFN2:
+        with TemporaryFile() as f:
             self.zipTest(f, zipfile.ZIP_STORED)
+            self.assertFalse(f.closed)
+        self.zipTest(TESTFN2, zipfile.ZIP_STORED)
 
-    if zlib:
-        def testDeflated(self):
-            # Try the temp file first.  If we do TESTFN2 first, then it hogs
-            # gigabytes of disk space for the duration of the test.
-            for f in TemporaryFile(), TESTFN2:
-                self.zipTest(f, zipfile.ZIP_DEFLATED)
+    @unittest.skipUnless(zlib, "requires zlib")
+    def testDeflated(self):
+        # Try the temp file first.  If we do TESTFN2 first, then it hogs
+        # gigabytes of disk space for the duration of the test.
+        with TemporaryFile() as f:
+            self.zipTest(f, zipfile.ZIP_DEFLATED)
+            self.assertFalse(f.closed)
+        self.zipTest(TESTFN2, zipfile.ZIP_DEFLATED)
 
     def tearDown(self):
         for fname in TESTFN, TESTFN2:
diff --git a/lib/python2.7/test/test_zlib.py b/lib/python2.7/test/test_zlib.py
index fb62081..c8fc985 100644
--- a/lib/python2.7/test/test_zlib.py
+++ b/lib/python2.7/test/test_zlib.py
@@ -1,6 +1,7 @@
 import unittest
 from test.test_support import TESTFN, run_unittest, import_module, unlink, requires
 import binascii
+import pickle
 import random
 from test.test_support import precisionbigmemtest, _1G, _4G
 import sys
@@ -502,6 +503,16 @@
         d.flush()
         self.assertRaises(ValueError, d.copy)
 
+    def test_compresspickle(self):
+        for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+            with self.assertRaises((TypeError, pickle.PicklingError)):
+                pickle.dumps(zlib.compressobj(zlib.Z_BEST_COMPRESSION), proto)
+
+    def test_decompresspickle(self):
+        for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+            with self.assertRaises((TypeError, pickle.PicklingError)):
+                pickle.dumps(zlib.decompressobj(), proto)
+
     # Memory use of the following functions takes into account overallocation
 
     @precisionbigmemtest(size=_1G + 1024 * 1024, memuse=3)
diff --git a/lib/python2.7/textwrap.py b/lib/python2.7/textwrap.py
index e755860..5c2e4fa 100644
--- a/lib/python2.7/textwrap.py
+++ b/lib/python2.7/textwrap.py
@@ -403,11 +403,15 @@
         elif margin.startswith(indent):
             margin = indent
 
-        # Current line and previous winner have no common whitespace:
-        # there is no margin.
+        # Find the largest common whitespace between current line and previous
+        # winner.
         else:
-            margin = ""
-            break
+            for i, (x, y) in enumerate(zip(margin, indent)):
+                if x != y:
+                    margin = margin[:i]
+                    break
+            else:
+                margin = margin[:len(indent)]
 
     # sanity check (testing/debugging only)
     if 0 and margin:
diff --git a/lib/python2.7/threading.py b/lib/python2.7/threading.py
index 27a5511..527f20a 100644
--- a/lib/python2.7/threading.py
+++ b/lib/python2.7/threading.py
@@ -565,7 +565,7 @@
 
     def _reset_internal_locks(self):
         # private!  called by Thread._reset_internal_locks by _after_fork()
-        self.__cond.__init__()
+        self.__cond.__init__(Lock())
 
     def isSet(self):
         'Return true if and only if the internal flag is true.'
@@ -580,12 +580,9 @@
         that call wait() once the flag is true will not block at all.
 
         """
-        self.__cond.acquire()
-        try:
+        with self.__cond:
             self.__flag = True
             self.__cond.notify_all()
-        finally:
-            self.__cond.release()
 
     def clear(self):
         """Reset the internal flag to false.
@@ -594,11 +591,8 @@
         set the internal flag to true again.
 
         """
-        self.__cond.acquire()
-        try:
+        with self.__cond:
             self.__flag = False
-        finally:
-            self.__cond.release()
 
     def wait(self, timeout=None):
         """Block until the internal flag is true.
@@ -615,13 +609,10 @@
         True except if a timeout is given and the operation times out.
 
         """
-        self.__cond.acquire()
-        try:
+        with self.__cond:
             if not self.__flag:
                 self.__cond.wait(timeout)
             return self.__flag
-        finally:
-            self.__cond.release()
 
 # Helper to generate new thread names
 _counter = _count().next
diff --git a/lib/python2.7/timeit.py b/lib/python2.7/timeit.py
index a8992f8..bf0301e 100755
--- a/lib/python2.7/timeit.py
+++ b/lib/python2.7/timeit.py
@@ -78,7 +78,7 @@
 # in Timer.__init__() depend on setup being indented 4 spaces and stmt
 # being indented 8 spaces.
 template = """
-def inner(_it, _timer):
+def inner(_it, _timer%(init)s):
     %(setup)s
     _t0 = _timer()
     for _i in _it:
@@ -132,9 +132,10 @@
             stmt = reindent(stmt, 8)
             if isinstance(setup, basestring):
                 setup = reindent(setup, 4)
-                src = template % {'stmt': stmt, 'setup': setup}
+                src = template % {'stmt': stmt, 'setup': setup, 'init': ''}
             elif hasattr(setup, '__call__'):
-                src = template % {'stmt': stmt, 'setup': '_setup()'}
+                src = template % {'stmt': stmt, 'setup': '_setup()',
+                                  'init': ', _setup=_setup'}
                 ns['_setup'] = setup
             else:
                 raise ValueError("setup is neither a string nor callable")
diff --git a/lib/python2.7/tokenize.py b/lib/python2.7/tokenize.py
index 661ddeb..d426cd2 100644
--- a/lib/python2.7/tokenize.py
+++ b/lib/python2.7/tokenize.py
@@ -198,6 +198,8 @@
 
     def untokenize(self, iterable):
         it = iter(iterable)
+        indents = []
+        startline = False
         for t in it:
             if len(t) == 2:
                 self.compat(t, it)
@@ -205,6 +207,21 @@
             tok_type, token, start, end, line = t
             if tok_type == ENDMARKER:
                 break
+            if tok_type == INDENT:
+                indents.append(token)
+                continue
+            elif tok_type == DEDENT:
+                indents.pop()
+                self.prev_row, self.prev_col = end
+                continue
+            elif tok_type in (NEWLINE, NL):
+                startline = True
+            elif startline and indents:
+                indent = indents[-1]
+                if start[1] >= len(indent):
+                    self.tokens.append(indent)
+                    self.prev_col = len(indent)
+                startline = False
             self.add_whitespace(start)
             self.tokens.append(token)
             self.prev_row, self.prev_col = end
diff --git a/lib/python2.7/unittest/case.py b/lib/python2.7/unittest/case.py
index 6bbc55f..644fe5b 100644
--- a/lib/python2.7/unittest/case.py
+++ b/lib/python2.7/unittest/case.py
@@ -127,8 +127,6 @@
                      (expected_regexp.pattern, str(exc_value)))
         return True
 
-def _sentinel(*args, **kwargs):
-    raise AssertionError('Should never be called')
 
 class TestCase(object):
     """A class whose instances are single test cases.
@@ -445,7 +443,7 @@
             return  '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
 
 
-    def assertRaises(self, excClass, callableObj=_sentinel, *args, **kwargs):
+    def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
         """Fail unless an exception of class excClass is raised
            by callableObj when invoked with arguments args and keyword
            arguments kwargs. If a different type of exception is
@@ -453,7 +451,7 @@
            deemed to have suffered an error, exactly as for an
            unexpected exception.
 
-           If called with callableObj omitted, will return a
+           If called with callableObj omitted or None, will return a
            context object used like this::
 
                 with self.assertRaises(SomeException):
@@ -469,7 +467,7 @@
                self.assertEqual(the_exception.error_code, 3)
         """
         context = _AssertRaisesContext(excClass, self)
-        if callableObj is _sentinel:
+        if callableObj is None:
             return context
         with context:
             callableObj(*args, **kwargs)
@@ -975,7 +973,7 @@
             self.fail(self._formatMessage(msg, standardMsg))
 
     def assertRaisesRegexp(self, expected_exception, expected_regexp,
-                           callable_obj=_sentinel, *args, **kwargs):
+                           callable_obj=None, *args, **kwargs):
         """Asserts that the message in a raised exception matches a regexp.
 
         Args:
@@ -989,7 +987,7 @@
         if expected_regexp is not None:
             expected_regexp = re.compile(expected_regexp)
         context = _AssertRaisesContext(expected_exception, self, expected_regexp)
-        if callable_obj is _sentinel:
+        if callable_obj is None:
             return context
         with context:
             callable_obj(*args, **kwargs)
diff --git a/lib/python2.7/unittest/main.py b/lib/python2.7/unittest/main.py
index b253679..ca99ac6 100644
--- a/lib/python2.7/unittest/main.py
+++ b/lib/python2.7/unittest/main.py
@@ -174,7 +174,7 @@
                               action='store_true')
         if self.catchbreak != False:
             parser.add_option('-c', '--catch', dest='catchbreak', default=False,
-                              help='Catch ctrl-C and display results so far',
+                              help='Catch Ctrl-C and display results so far',
                               action='store_true')
         if self.buffer != False:
             parser.add_option('-b', '--buffer', dest='buffer', default=False,
diff --git a/lib/python2.7/unittest/test/test_case.py b/lib/python2.7/unittest/test/test_case.py
index 4c2d1f9..7658189 100644
--- a/lib/python2.7/unittest/test/test_case.py
+++ b/lib/python2.7/unittest/test/test_case.py
@@ -967,9 +967,6 @@
         # Failure when no exception is raised
         with self.assertRaises(self.failureException):
             self.assertRaises(ExceptionMock, lambda: 0)
-        # Failure when the function is None
-        with self.assertRaises(TypeError):
-            self.assertRaises(ExceptionMock, None)
         # Failure when another exception is raised
         with self.assertRaises(ExceptionMock):
             self.assertRaises(ValueError, Stub)
@@ -1008,8 +1005,6 @@
         self.assertRaisesRegexp(ExceptionMock, re.compile('expect$'), Stub)
         self.assertRaisesRegexp(ExceptionMock, 'expect$', Stub)
         self.assertRaisesRegexp(ExceptionMock, u'expect$', Stub)
-        with self.assertRaises(TypeError):
-            self.assertRaisesRegexp(ExceptionMock, 'expect$', None)
 
     def testAssertNotRaisesRegexp(self):
         self.assertRaisesRegexp(
diff --git a/lib/python2.7/urllib.py b/lib/python2.7/urllib.py
index ccb0574..0cb3df9 100644
--- a/lib/python2.7/urllib.py
+++ b/lib/python2.7/urllib.py
@@ -629,18 +629,20 @@
     def http_error_302(self, url, fp, errcode, errmsg, headers, data=None):
         """Error 302 -- relocated (temporarily)."""
         self.tries += 1
-        if self.maxtries and self.tries >= self.maxtries:
-            if hasattr(self, "http_error_500"):
-                meth = self.http_error_500
-            else:
-                meth = self.http_error_default
+        try:
+            if self.maxtries and self.tries >= self.maxtries:
+                if hasattr(self, "http_error_500"):
+                    meth = self.http_error_500
+                else:
+                    meth = self.http_error_default
+                return meth(url, fp, 500,
+                            "Internal Server Error: Redirect Recursion",
+                            headers)
+            result = self.redirect_internal(url, fp, errcode, errmsg,
+                                            headers, data)
+            return result
+        finally:
             self.tries = 0
-            return meth(url, fp, 500,
-                        "Internal Server Error: Redirect Recursion", headers)
-        result = self.redirect_internal(url, fp, errcode, errmsg, headers,
-                                        data)
-        self.tries = 0
-        return result
 
     def redirect_internal(self, url, fp, errcode, errmsg, headers, data):
         if 'location' in headers:
diff --git a/lib/python2.7/urllib2.py b/lib/python2.7/urllib2.py
index 9277b1d..93a3350 100644
--- a/lib/python2.7/urllib2.py
+++ b/lib/python2.7/urllib2.py
@@ -248,11 +248,9 @@
         # methods getting called in a non-standard order.  this may be
         # too complicated and/or unnecessary.
         # XXX should the __r_XXX attributes be public?
-        if attr[:12] == '_Request__r_':
-            name = attr[12:]
-            if hasattr(Request, 'get_' + name):
-                getattr(self, 'get_' + name)()
-                return getattr(self, attr)
+        if attr in ('_Request__r_type', '_Request__r_host'):
+            getattr(self, 'get_' + attr[12:])()
+            return self.__dict__[attr]
         raise AttributeError, attr
 
     def get_method(self):
@@ -1073,6 +1071,9 @@
         elif algorithm == 'SHA':
             H = lambda x: hashlib.sha1(x).hexdigest()
         # XXX MD5-sess
+        else:
+            raise ValueError("Unsupported digest authentication "
+                             "algorithm %r" % algorithm.lower())
         KD = lambda s, d: H("%s:%s" % (s, d))
         return H, KD
 
diff --git a/lib/python2.7/uuid.py b/lib/python2.7/uuid.py
index 0f0d8c1..7432032 100644
--- a/lib/python2.7/uuid.py
+++ b/lib/python2.7/uuid.py
@@ -44,6 +44,8 @@
     UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
 """
 
+import os
+
 __author__ = 'Ka-Ping Yee <ping@zesty.ca>'
 
 RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
@@ -438,23 +440,25 @@
 # Thanks to Thomas Heller for ctypes and for his help with its use here.
 
 # If ctypes is available, use it to find system routines for UUID generation.
-_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
+_uuid_generate_time = _UuidCreate = None
 try:
     import ctypes, ctypes.util
+    import sys
 
     # The uuid_generate_* routines are provided by libuuid on at least
     # Linux and FreeBSD, and provided by libc on Mac OS X.
-    for libname in ['uuid', 'c']:
+    _libnames = ['uuid']
+    if not sys.platform.startswith('win'):
+        _libnames.append('c')
+    for libname in _libnames:
         try:
             lib = ctypes.CDLL(ctypes.util.find_library(libname))
         except:
             continue
-        if hasattr(lib, 'uuid_generate_random'):
-            _uuid_generate_random = lib.uuid_generate_random
         if hasattr(lib, 'uuid_generate_time'):
             _uuid_generate_time = lib.uuid_generate_time
-            if _uuid_generate_random is not None:
-                break  # found everything we were looking for
+            break
+    del _libnames
 
     # The uuid_generate_* functions are broken on MacOS X 10.5, as noted
     # in issue #8621 the function generates the same sequence of values
@@ -463,11 +467,10 @@
     #
     # Assume that the uuid_generate functions are broken from 10.5 onward,
     # the test can be adjusted when a later version is fixed.
-    import sys
     if sys.platform == 'darwin':
         import os
         if int(os.uname()[2].split('.')[0]) >= 9:
-            _uuid_generate_random = _uuid_generate_time = None
+            _uuid_generate_time = None
 
     # On Windows prior to 2000, UuidCreate gives a UUID containing the
     # hardware address.  On Windows 2000 and later, UuidCreate makes a
@@ -578,21 +581,7 @@
 
 def uuid4():
     """Generate a random UUID."""
-
-    # When the system provides a version-4 UUID generator, use it.
-    if _uuid_generate_random:
-        _buffer = ctypes.create_string_buffer(16)
-        _uuid_generate_random(_buffer)
-        return UUID(bytes=_buffer.raw)
-
-    # Otherwise, get randomness from urandom or the 'random' module.
-    try:
-        import os
-        return UUID(bytes=os.urandom(16), version=4)
-    except:
-        import random
-        bytes = [chr(random.randrange(256)) for i in range(16)]
-        return UUID(bytes=bytes, version=4)
+    return UUID(bytes=os.urandom(16), version=4)
 
 def uuid5(namespace, name):
     """Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
diff --git a/lib/python2.7/warnings.py b/lib/python2.7/warnings.py
index fbec94b..b0d53aa 100644
--- a/lib/python2.7/warnings.py
+++ b/lib/python2.7/warnings.py
@@ -31,7 +31,7 @@
             return
     try:
         file.write(formatwarning(message, category, filename, lineno, line))
-    except IOError:
+    except (IOError, UnicodeError):
         pass # the file (probably stderr) is invalid - this warning gets lost.
 # Keep a working version around in case the deprecation of the old API is
 # triggered.
@@ -39,11 +39,29 @@
 
 def formatwarning(message, category, filename, lineno, line=None):
     """Function to format a warning the standard way."""
-    s =  "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
+    try:
+        unicodetype = unicode
+    except NameError:
+        unicodetype = ()
+    try:
+        message = str(message)
+    except UnicodeEncodeError:
+        pass
+    s =  "%s: %s: %s\n" % (lineno, category.__name__, message)
     line = linecache.getline(filename, lineno) if line is None else line
     if line:
         line = line.strip()
+        if isinstance(s, unicodetype) and isinstance(line, str):
+            line = unicode(line, 'latin1')
         s += "  %s\n" % line
+    if isinstance(s, unicodetype) and isinstance(filename, str):
+        enc = sys.getfilesystemencoding()
+        if enc:
+            try:
+                filename = unicode(filename, enc)
+            except UnicodeDecodeError:
+                pass
+    s = "%s:%s" % (filename, s)
     return s
 
 def filterwarnings(action, message="", category=Warning, module="", lineno=0,
diff --git a/lib/python2.7/weakref.py b/lib/python2.7/weakref.py
index 787c885..ca37f87 100644
--- a/lib/python2.7/weakref.py
+++ b/lib/python2.7/weakref.py
@@ -44,7 +44,14 @@
     # objects are unwrapped on the way out, and we always wrap on the
     # way in).
 
-    def __init__(self, *args, **kw):
+    def __init__(*args, **kw):
+        if not args:
+            raise TypeError("descriptor '__init__' of 'WeakValueDictionary' "
+                            "object needs an argument")
+        self = args[0]
+        args = args[1:]
+        if len(args) > 1:
+            raise TypeError('expected at most 1 arguments, got %d' % len(args))
         def remove(wr, selfref=ref(self)):
             self = selfref()
             if self is not None:
@@ -214,7 +221,15 @@
         else:
             return wr()
 
-    def update(self, dict=None, **kwargs):
+    def update(*args, **kwargs):
+        if not args:
+            raise TypeError("descriptor 'update' of 'WeakValueDictionary' "
+                            "object needs an argument")
+        self = args[0]
+        args = args[1:]
+        if len(args) > 1:
+            raise TypeError('expected at most 1 arguments, got %d' % len(args))
+        dict = args[0] if args else None
         if self._pending_removals:
             self._commit_removals()
         d = self.data
diff --git a/lib/python2.7/wsgiref/validate.py b/lib/python2.7/wsgiref/validate.py
index 04a893d..c327812 100644
--- a/lib/python2.7/wsgiref/validate.py
+++ b/lib/python2.7/wsgiref/validate.py
@@ -329,7 +329,7 @@
 
     # @@: these need filling out:
     if environ['REQUEST_METHOD'] not in (
-        'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'):
+        'GET', 'HEAD', 'POST', 'OPTIONS', 'PATCH', 'PUT', 'DELETE', 'TRACE'):
         warnings.warn(
             "Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'],
             WSGIWarning)
diff --git a/lib/python2.7/xml/dom/minicompat.py b/lib/python2.7/xml/dom/minicompat.py
index de4cb4f..266a7f4 100644
--- a/lib/python2.7/xml/dom/minicompat.py
+++ b/lib/python2.7/xml/dom/minicompat.py
@@ -65,10 +65,10 @@
     length = property(_get_length, _set_length,
                       doc="The number of nodes in the NodeList.")
 
-    def __getstate__(self):
-        return list(self)
-
+    # For backward compatibility
     def __setstate__(self, state):
+        if state is None:
+            state = []
         self[:] = state
 
 
diff --git a/lib/python2.7/xml/etree/ElementTree.py b/lib/python2.7/xml/etree/ElementTree.py
index 9f3e75d..c6e3d39 100644
--- a/lib/python2.7/xml/etree/ElementTree.py
+++ b/lib/python2.7/xml/etree/ElementTree.py
@@ -1198,9 +1198,14 @@
     if not hasattr(source, "read"):
         source = open(source, "rb")
         close_source = True
-    if not parser:
-        parser = XMLParser(target=TreeBuilder())
-    return _IterParseIterator(source, events, parser, close_source)
+    try:
+        if not parser:
+            parser = XMLParser(target=TreeBuilder())
+        return _IterParseIterator(source, events, parser, close_source)
+    except:
+        if close_source:
+            source.close()
+        raise
 
 class _IterParseIterator(object):
 
@@ -1252,34 +1257,40 @@
                 raise ValueError("unknown event %r" % event)
 
     def next(self):
-        while 1:
-            try:
-                item = self._events[self._index]
-                self._index += 1
-                return item
-            except IndexError:
-                pass
-            if self._error:
-                e = self._error
-                self._error = None
-                raise e
-            if self._parser is None:
-                self.root = self._root
-                if self._close_file:
-                    self._file.close()
-                raise StopIteration
-            # load event buffer
-            del self._events[:]
-            self._index = 0
-            data = self._file.read(16384)
-            if data:
+        try:
+            while 1:
                 try:
-                    self._parser.feed(data)
-                except SyntaxError as exc:
-                    self._error = exc
-            else:
-                self._root = self._parser.close()
-                self._parser = None
+                    item = self._events[self._index]
+                    self._index += 1
+                    return item
+                except IndexError:
+                    pass
+                if self._error:
+                    e = self._error
+                    self._error = None
+                    raise e
+                if self._parser is None:
+                    self.root = self._root
+                    break
+                # load event buffer
+                del self._events[:]
+                self._index = 0
+                data = self._file.read(16384)
+                if data:
+                    try:
+                        self._parser.feed(data)
+                    except SyntaxError as exc:
+                        self._error = exc
+                else:
+                    self._root = self._parser.close()
+                    self._parser = None
+        except:
+            if self._close_file:
+                self._file.close()
+            raise
+        if self._close_file:
+            self._file.close()
+        raise StopIteration
 
     def __iter__(self):
         return self
diff --git a/lib/python2.7/xmlrpclib.py b/lib/python2.7/xmlrpclib.py
index db185a6..e0e399c 100644
--- a/lib/python2.7/xmlrpclib.py
+++ b/lib/python2.7/xmlrpclib.py
@@ -393,7 +393,7 @@
         elif datetime and isinstance(other, datetime.datetime):
             s = self.value
             o = other.strftime("%Y%m%dT%H:%M:%S")
-        elif isinstance(other, (str, unicode)):
+        elif isinstance(other, basestring):
             s = self.value
             o = other
         elif hasattr(other, "timetuple"):
@@ -703,9 +703,8 @@
 
     if unicode:
         def dump_unicode(self, value, write, escape=escape):
-            value = value.encode(self.encoding)
             write("<value><string>")
-            write(escape(value))
+            write(escape(value).encode(self.encoding, 'xmlcharrefreplace'))
             write("</string></value>\n")
         dispatch[UnicodeType] = dump_unicode
 
@@ -732,12 +731,13 @@
         write("<value><struct>\n")
         for k, v in value.items():
             write("<member>\n")
-            if type(k) is not StringType:
-                if unicode and type(k) is UnicodeType:
-                    k = k.encode(self.encoding)
-                else:
-                    raise TypeError, "dictionary key must be string"
-            write("<name>%s</name>\n" % escape(k))
+            if type(k) is StringType:
+                k = escape(k)
+            elif unicode and type(k) is UnicodeType:
+                k = escape(k).encode(self.encoding, 'xmlcharrefreplace')
+            else:
+                raise TypeError, "dictionary key must be string"
+            write("<name>%s</name>\n" % k)
             dump(v, write)
             write("</member>\n")
         write("</struct></value>\n")
@@ -1099,7 +1099,7 @@
     if methodname:
         # a method call
         if not isinstance(methodname, StringType):
-            methodname = methodname.encode(encoding)
+            methodname = methodname.encode(encoding, 'xmlcharrefreplace')
         data = (
             xmlheader,
             "<methodCall>\n"
@@ -1560,7 +1560,7 @@
                  allow_none=0, use_datetime=0, context=None):
         # establish a "logical" server connection
 
-        if isinstance(uri, unicode):
+        if unicode and isinstance(uri, unicode):
             uri = uri.encode('ISO-8859-1')
 
         # get the url
diff --git a/lib/python2.7/zipfile.py b/lib/python2.7/zipfile.py
index b77e6c8..3ab66ce 100644
--- a/lib/python2.7/zipfile.py
+++ b/lib/python2.7/zipfile.py
@@ -1134,7 +1134,9 @@
             arcname += '/'
         zinfo = ZipInfo(arcname, date_time)
         zinfo.external_attr = (st[0] & 0xFFFF) << 16L      # Unix attributes
-        if compress_type is None:
+        if isdir:
+            zinfo.compress_type = ZIP_STORED
+        elif compress_type is None:
             zinfo.compress_type = self.compression
         else:
             zinfo.compress_type = compress_type
diff --git a/revisions b/revisions
new file mode 100644
index 0000000..0d51143
--- /dev/null
+++ b/revisions
@@ -0,0 +1,4 @@
+external/lldb-utils 995307d34027ca4082ffc37d09448dee81814ad4
+external/python 1c73ed5422f31dc7870929a53ba82f8de1039e19
+prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 2ccb38af8c940f1feef62ff5986f4bbc5d899e66
+prebuilts/clang/linux-x86/host/3.6 b660e8f76f1543abf50ebfbdb8d10112d93fec3d
diff --git a/share/man/man1/python2.7.1 b/share/man/man1/python2.7.1
index d87902f..054fbb5 100644
--- a/share/man/man1/python2.7.1
+++ b/share/man/man1/python2.7.1
@@ -289,9 +289,9 @@
 from that file;
 when called with
 .B \-c
-.I command,
+.IR command ,
 it executes the Python statement(s) given as
-.I command.
+.IR command .
 Here
 .I command
 may contain multiple statements separated by newlines.
@@ -301,7 +301,7 @@
 .PP
 If available, the script name and additional arguments thereafter are
 passed to the script in the Python variable
-.I sys.argv ,
+.IR sys.argv ,
 which is a list of strings (you must first
 .I import sys
 to be able to access it).
@@ -315,14 +315,14 @@
 .I '-c'.
 Note that options interpreted by the Python interpreter itself
 are not placed in
-.I sys.argv.
+.IR sys.argv .
 .PP
 In interactive mode, the primary prompt is `>>>'; the second prompt
 (which appears when a command is not complete) is `...'.
 The prompts can be changed by assignment to
 .I sys.ps1
 or
-.I sys.ps2.
+.IR sys.ps2 .
 The interpreter quits when it reads an EOF at a prompt.
 When an unhandled exception occurs, a stack trace is printed and
 control returns to the primary prompt; in non-interactive mode, the
@@ -381,7 +381,7 @@
 inserted in the path in front of $PYTHONPATH.
 The search path can be manipulated from within a Python program as the
 variable
-.I sys.path .
+.IR sys.path .
 .IP PYTHONSTARTUP
 If this is the name of a readable file, the Python commands in that
 file are executed before the first prompt is displayed in interactive
@@ -452,7 +452,7 @@
 the value 0 will lead to the same hash values as when hash randomization is
 disabled.
 .SH AUTHOR
-The Python Software Foundation: https://www.python.org/psf
+The Python Software Foundation: https://www.python.org/psf/
 .SH INTERNET RESOURCES
 Main website:  https://www.python.org/
 .br