initial drop of setuptools v39.1.0

Test: N/A
Bug: b/79751992
Change-Id: Iccb41bbd58f7a6af510957966c5d893c2875e4ee
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..3994a21
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,29 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+python_library {
+    name: "py-setuptools",
+    host_supported: true,
+    srcs: [
+        "pkg_resources/__init__.py",
+        "pkg_resources/py31compat.py",
+        "pkg_resources/extern/**/*.py",
+        "pkg_resources/_vendor/**/*.py",
+    ],
+    version: {
+        py2: {
+            enabled: true,
+        },
+    },
+}
diff --git a/CHANGES.rst b/CHANGES.rst
new file mode 100644
index 0000000..d8b5b49
--- /dev/null
+++ b/CHANGES.rst
@@ -0,0 +1,3660 @@
+v39.1.0
+-------
+
+* #1340: Update all PyPI URLs to reflect the switch to the
+  new Warehouse codebase.
+* #1337: In ``pkg_resources``, now support loading resources
+  for modules loaded by the ``SourcelessFileLoader``.
+* #1332: Silence spurious wheel related warnings on Windows.
+
+v39.0.1
+-------
+
+* #1297: Restore Unicode handling for Maintainer fields in
+  metadata.
+
+v39.0.0
+-------
+
+* #1296: Setuptools now vendors its own direct dependencies, no
+  longer relying on the dependencies as vendored by pkg_resources.
+
+* #296: Removed long-deprecated support for iteration on
+  Version objects as returned by ``pkg_resources.parse_version``.
+  Removed the ``SetuptoolsVersion`` and
+  ``SetuptoolsLegacyVersion`` names as well. They should not
+  have been used, but if they were, replace with
+  ``Version`` and ``LegacyVersion`` from ``packaging.version``.
+
+v38.7.0
+-------
+
+* #1288: Add support for maintainer in PKG-INFO.
+
+v38.6.1
+-------
+
+* #1292: Avoid generating ``Provides-Extra`` in metadata when
+  no extra is present (but environment markers are).
+
+v38.6.0
+-------
+
+* #1286: Add support for Metadata 2.1 (PEP 566).
+
+v38.5.2
+-------
+
+* #1285: Fixed RuntimeError in pkg_resources.parse_requirements
+  on Python 3.7 (stemming from PEP 479).
+
+v38.5.1
+-------
+
+* #1271: Revert to Cython legacy ``build_ext`` behavior for
+  compatibility.
+
+v38.5.0
+-------
+
+* #1229: Expand imports in ``build_ext`` to refine detection of
+  Cython availability.
+
+* #1270: When Cython is available, ``build_ext`` now uses the
+  new_build_ext.
+
+v38.4.1
+-------
+
+* #1257: In bdist_egg.scan_module, fix ValueError on Python 3.7.
+
+v38.4.0
+-------
+
+* #1231: Removed warning when PYTHONDONTWRITEBYTECODE is enabled.
+
+v38.3.0
+-------
+
+* #1210: Add support for PEP 345 Project-URL metadata.
+* #1207: Add support for ``long_description_type`` to setup.cfg
+  declarative config as intended and documented.
+
+v38.2.5
+-------
+
+* #1232: Fix trailing slash handling in ``pkg_resources.ZipProvider``.
+
+v38.2.4
+-------
+
+* #1220: Fix `data_files` handling when installing from wheel.
+
+v38.2.3
+-------
+
+* fix Travis' Python 3.3 job.
+
+v38.2.2
+-------
+
+* #1214: fix handling of namespace packages when installing
+  from a wheel.
+
+v38.2.1
+-------
+
+* #1212: fix encoding handling of metadata when installing
+  from a wheel.
+
+v38.2.0
+-------
+
+* #1200: easy_install now support installing from wheels:
+  they will be installed as standalone unzipped eggs.
+
+v38.1.0
+-------
+
+* #1208: Improve error message when failing to locate scripts
+  in egg-info metadata.
+
+v38.0.0
+-------
+
+* #458: In order to support deterministic builds, Setuptools no
+  longer allows packages to declare ``install_requires`` as
+  unordered sequences (sets or dicts).
+
+v37.0.0
+-------
+
+* #878: Drop support for Python 2.6. Python 2.6 users should
+  rely on 'setuptools < 37dev'.
+
+v36.8.0
+-------
+
+* #1190: In SSL support for package index operations, use SNI
+  where available.
+
+v36.7.3
+-------
+
+* #1175: Bug fixes to ``build_meta`` module.
+
+v36.7.2
+-------
+
+* #701: Fixed duplicate test discovery on Python 3.
+
+v36.7.1
+-------
+
+* #1193: Avoid test failures in bdist_egg when
+  PYTHONDONTWRITEBYTECODE is set.
+
+v36.7.0
+-------
+
+* #1054: Support ``setup_requires`` in ``setup.cfg`` files.
+
+v36.6.1
+-------
+
+* #1132: Removed redundant and costly serialization/parsing step
+  in ``EntryPoint.__init__``.
+
+* #844: ``bdist_egg --exclude-source-files`` now tested and works
+  on Python 3.
+
+v36.6.0
+-------
+
+* #1143: Added ``setuptools.build_meta`` module, an implementation
+  of PEP-517 for Setuptools-defined packages.
+
+* #1143: Added ``dist_info`` command for producing dist_info
+  metadata.
+
+v36.5.0
+-------
+
+* #170: When working with Mercurial checkouts, use Windows-friendly
+  syntax for suppressing output.
+
+* Inspired by #1134, performed substantial refactoring of
+  ``pkg_resources.find_on_path`` to facilitate an optimization
+  for paths with many non-version entries.
+
+v36.4.0
+-------
+
+* #1075: Add new ``Description-Content-Type`` metadata field. `See here for
+  documentation on how to use this field.
+  <https://packaging.python.org/specifications/#description-content-type>`_
+
+* #1068: Sort files and directories when building eggs for
+  deterministic order.
+
+* #196: Remove caching of easy_install command in fetch_build_egg.
+  Fixes issue where ``pytest-runner-N.N`` would satisfy the installation
+  of ``pytest``.
+
+* #1129: Fix working set dependencies handling when replacing conflicting
+  distributions (e.g. when using ``setup_requires`` with a conflicting
+  transitive dependency, fix #1124).
+
+* #1133: Improved handling of README files extensions and added
+  Markdown to the list of searched READMES.
+
+* #1135: Improve performance of pkg_resources import by not invoking
+  ``access`` or ``stat`` and using ``os.listdir`` instead.
+
+v36.3.0
+-------
+
+* #1131: Make possible using several files within ``file:`` directive
+  in metadata.long_description in ``setup.cfg``.
+
+v36.2.7
+-------
+
+* fix #1105: Fix handling of requirements with environment
+  markers when declared in ``setup.cfg`` (same treatment as
+  for #1081).
+
+v36.2.6
+-------
+
+* #462: Don't assume a directory is an egg by the ``.egg``
+  extension alone.
+
+v36.2.5
+-------
+
+* #1093: Fix test command handler with extras_require.
+* #1112, #1091, #1115: Now using Trusty containers in
+  Travis for CI and CD.
+
+v36.2.4
+-------
+
+* #1092: ``pkg_resources`` now uses ``inspect.getmro`` to
+  resolve classes in method resolution order.
+
+v36.2.3
+-------
+
+* #1102: Restore behavior for empty extras.
+
+v36.2.2
+-------
+
+* #1099: Revert commit a3ec721, restoring intended purpose of
+  extras as part of a requirement declaration.
+
+v36.2.1
+-------
+
+* fix #1086
+* fix #1087
+* support extras specifiers in install_requires requirements
+
+v36.2.0
+-------
+
+* #1081: Environment markers indicated in ``install_requires``
+  are now processed and treated as nameless ``extras_require``
+  with markers, allowing their metadata in requires.txt to be
+  correctly generated.
+
+* #1053: Tagged commits are now released using Travis-CI
+  build stages, meaning releases depend on passing tests on
+  all supported Python versions (Linux) and not just the latest
+  Python version.
+
+v36.1.1
+-------
+
+* #1083: Correct ``py31compat.makedirs`` to correctly honor
+  ``exist_ok`` parameter.
+* #1083: Also use makedirs compatibility throughout setuptools.
+
+v36.1.0
+-------
+
+* #1083: Avoid race condition on directory creation in
+  ``pkg_resources.ensure_directory``.
+
+* Removed deprecation of and restored support for
+  ``upload_docs`` command for sites other than PyPI.
+  Only warehouse is dropping support, but services like
+  `devpi <http://doc.devpi.net/latest/>`_ continue to
+  support docs built by setuptools' plugins. See
+  `this comment <https://bitbucket.org/hpk42/devpi/issues/388/support-rtd-model-for-building-uploading#comment-34292423>`_
+  for more context on the motivation for this change.
+
+v36.0.1
+-------
+
+* #1042: Fix import in py27compat module that still
+  referenced six directly, rather than through the externs
+  module (vendored packages hook).
+
+v36.0.0
+-------
+
+* #980 and others: Once again, Setuptools vendors all
+  of its dependencies. It seems to be the case that in
+  the Python ecosystem, all build tools must run without
+  any dependencies (build, runtime, or otherwise). At
+  such a point that a mechanism exists that allows
+  build tools to have dependencies, Setuptools will adopt
+  it.
+
+v35.0.2
+-------
+
+* #1015: Fix test failures on Python 3.7.
+
+* #1024: Add workaround for Jython #2581 in monkey module.
+
+v35.0.1
+-------
+
+* #992: Revert change introduced in v34.4.1, now
+  considered invalid.
+
+* #1016: Revert change introduced in v35.0.0 per #1014,
+  referencing #436. The approach had unintended
+  consequences, causing sdist installs to be missing
+  files.
+
+v35.0.0
+-------
+
+* #436: In egg_info.manifest_maker, no longer read
+  the file list from the manifest file, and instead
+  re-build it on each build. In this way, files removed
+  from the specification will not linger in the manifest.
+  As a result, any files manually added to the manifest
+  will be removed on subsequent egg_info invocations.
+  No projects should be manually adding files to the
+  manifest and should instead use MANIFEST.in or SCM
+  file finders to force inclusion of files in the manifest.
+
+v34.4.1
+-------
+
+* #1008: In MSVC support, use always the last version available for Windows SDK and UCRT SDK.
+
+* #1008: In MSVC support, fix "vcruntime140.dll" returned path with Visual Studio 2017.
+
+* #992: In msvc.msvc9_query_vcvarsall, ensure the
+  returned dicts have str values and not Unicode for
+  compatibility with os.environ.
+
+v34.4.0
+-------
+
+* #995: In MSVC support, add support for "Microsoft Visual Studio 2017" and "Microsoft Visual Studio Build Tools 2017".
+
+* #999 via #1007: Extend support for declarative package
+  config in a setup.cfg file to include the options
+  ``python_requires`` and ``py_modules``.
+
+v34.3.3
+-------
+
+* #967 (and #997): Explicitly import submodules of
+  packaging to account for environments where the imports
+  of those submodules is not implied by other behavior.
+
+v34.3.2
+-------
+
+* #993: Fix documentation upload by correcting
+  rendering of content-type in _build_multipart
+  on Python 3.
+
+v34.3.1
+-------
+
+* #988: Trap ``os.unlink`` same as ``os.remove`` in
+  ``auto_chmod`` error handler.
+
+* #983: Fixes to invalid escape sequence deprecations on
+  Python 3.6.
+
+v34.3.0
+-------
+
+* #941: In the upload command, if the username is blank,
+  default to ``getpass.getuser()``.
+
+* #971: Correct distutils findall monkeypatch to match
+  appropriate versions (namely Python 3.4.6).
+
+v34.2.0
+-------
+
+* #966: Add support for reading dist-info metadata and
+  thus locating Distributions from zip files.
+
+* #968: Allow '+' and '!' in egg fragments
+  so that it can take package names that contain
+  PEP 440 conforming version specifiers.
+
+v34.1.1
+-------
+
+* #953: More aggressively employ the compatibility issue
+  originally added in #706.
+
+v34.1.0
+-------
+
+* #930: ``build_info`` now accepts two new parameters
+  to optimize and customize the building of C libraries.
+
+v34.0.3
+-------
+
+* #947: Loosen restriction on the version of six required,
+  restoring compatibility with environments relying on
+  six 1.6.0 and later.
+
+v34.0.2
+-------
+
+* #882: Ensure extras are honored when building the
+  working set.
+* #913: Fix issue in develop if package directory has
+  a trailing slash.
+
+v34.0.1
+-------
+
+* #935: Fix glob syntax in graft.
+
+v34.0.0
+-------
+
+* #581: Instead of vendoring the growing list of
+  dependencies that Setuptools requires to function,
+  Setuptools now requires these dependencies just like
+  any other project. Unlike other projects, however,
+  Setuptools cannot rely on ``setup_requires`` to
+  demand the dependencies it needs to install because
+  its own machinery would be necessary to pull those
+  dependencies if not present (a bootstrapping problem).
+  As a result, Setuptools no longer supports self upgrade or
+  installation in the general case. Instead, users are
+  directed to use pip to install and upgrade using the
+  ``wheel`` distributions of setuptools.
+
+  Users are welcome to contrive other means to install
+  or upgrade Setuptools using other means, such as
+  pre-installing the Setuptools dependencies with pip
+  or a bespoke bootstrap tool, but such usage is not
+  recommended and is not supported.
+
+  As discovered in #940, not all versions of pip will
+  successfully install Setuptools from its pre-built
+  wheel. If you encounter issues with "No module named
+  six" or "No module named packaging", especially
+  following a line "Running setup.py egg_info for package
+  setuptools", then your pip is not new enough.
+
+  There's an additional issue in pip where setuptools
+  is upgraded concurrently with other source packages,
+  described in pip #4253. The proposed workaround is to
+  always upgrade Setuptools first prior to upgrading
+  other packages that would upgrade Setuptools.
+
+v33.1.1
+-------
+
+* #921: Correct issue where certifi fallback not being
+  reached on Windows.
+
+v33.1.0
+-------
+
+Installation via pip, as indicated in the `Python Packaging
+User's Guide <https://packaging.python.org/installing/>`_,
+is the officially-supported mechanism for installing
+Setuptools, and this recommendation is now explicit in the
+much more concise README.
+
+Other edits and tweaks were made to the documentation. The
+codebase is unchanged.
+
+v33.0.0
+-------
+
+* #619: Removed support for the ``tag_svn_revision``
+  distribution option. If Subversion tagging support is
+  still desired, consider adding the functionality to
+  setuptools_svn in setuptools_svn #2.
+
+v32.3.1
+-------
+
+* #866: Use ``dis.Bytecode`` on Python 3.4 and later in
+  ``setuptools.depends``.
+
+v32.3.0
+-------
+
+* #889: Backport proposed fix for disabling interpolation in
+  distutils.Distribution.parse_config_files.
+
+v32.2.0
+-------
+
+* #884: Restore support for running the tests under
+  `pytest-runner <https://github.com/pytest-dev/pytest-runner>`_
+  by ensuring that PYTHONPATH is honored in tests invoking
+  a subprocess.
+
+v32.1.3
+-------
+
+* #706: Add rmtree compatibility shim for environments where
+  rmtree fails when passed a unicode string.
+
+v32.1.2
+-------
+
+* #893: Only release sdist in zip format as warehouse now
+  disallows releasing two different formats.
+
+v32.1.1
+-------
+
+* #704: More selectively ensure that 'rmtree' is not invoked with
+  a byte string, enabling it to remove files that are non-ascii,
+  even on Python 2.
+
+* #712: In 'sandbox.run_setup', ensure that ``__file__`` is
+  always a ``str``, modeling the behavior observed by the
+  interpreter when invoking scripts and modules.
+
+v32.1.0
+-------
+
+* #891: In 'test' command on test failure, raise DistutilsError,
+  suppression invocation of subsequent commands.
+
+v32.0.0
+-------
+
+* #890: Revert #849. ``global-exclude .foo`` will not match all
+  ``*.foo`` files any more. Package authors must add an explicit
+  wildcard, such as ``global-exclude *.foo``, to match all
+  ``.foo`` files. See #886, #849.
+
+v31.0.1
+-------
+
+* #885: Fix regression where 'pkg_resources._rebuild_mod_path'
+  would fail when a namespace package's '__path__' was not
+  a list with a sort attribute.
+
+v31.0.0
+-------
+
+* #250: Install '-nspkg.pth' files for packages installed
+  with 'setup.py develop'. These .pth files allow
+  namespace packages installed by pip or develop to
+  co-mingle. This change required the removal of the
+  change for #805 and pip #1924, introduced in 28.3.0 and implicated
+  in #870, but means that namespace packages not in a
+  site packages directory will no longer work on Python
+  earlier than 3.5, whereas before they would work on
+  Python not earlier than 3.3.
+
+v30.4.0
+-------
+
+* #879: For declarative config:
+
+  - read_configuration() now accepts ignore_option_errors argument. This allows scraping tools to read metadata without a need to download entire packages. E.g. we can gather some stats right from GitHub repos just by downloading setup.cfg.
+
+  - packages find: directive now supports fine tuning from a subsection. The same arguments as for find() are accepted.
+
+v30.3.0
+-------
+
+* #394 via #862: Added support for `declarative package
+  config in a setup.cfg file
+  <https://setuptools.readthedocs.io/en/latest/setuptools.html#configuring-setup-using-setup-cfg-files>`_.
+
+v30.2.1
+-------
+
+* #850: In test command, invoke unittest.main with
+  indication not to exit the process.
+
+v30.2.0
+-------
+
+* #854: Bump to vendored Packaging 16.8.
+
+v30.1.0
+-------
+
+* #846: Also trap 'socket.error' when opening URLs in
+  package_index.
+
+* #849: Manifest processing now matches the filename
+  pattern anywhere in the filename and not just at the
+  start. Restores behavior found prior to 28.5.0.
+
+v30.0.0
+-------
+
+* #864: Drop support for Python 3.2. Systems requiring
+  Python 3.2 support must use 'setuptools < 30'.
+
+* #825: Suppress warnings for single files.
+
+* #830 via #843: Once again restored inclusion of data
+  files to sdists, but now trap TypeError caused by
+  techniques employed rjsmin and similar.
+
+v29.0.1
+-------
+
+* #861: Re-release of v29.0.1 with the executable script
+  launchers bundled. Now, launchers are included by default
+  and users that want to disable this behavior must set the
+  environment variable
+  'SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES' to
+  a false value like "false" or "0".
+
+v29.0.0
+-------
+
+* #841: Drop special exception for packages invoking
+  win32com during the build/install process. See
+  Distribute #118 for history.
+
+v28.8.0
+-------
+
+* #629: Per the discussion, refine the sorting to use version
+  value order for more accurate detection of the latest
+  available version when scanning for packages. See also
+  #829.
+
+* #837: Rely on the config var "SO" for Python 3.3.0 only
+  when determining the ext filename.
+
+v28.7.1
+-------
+
+* #827: Update PyPI root for dependency links.
+
+* #833: Backed out changes from #830 as the implementation
+  seems to have problems in some cases.
+
+v28.7.0
+-------
+
+* #832: Moved much of the namespace package handling
+  functionality into a separate module for re-use in something
+  like #789.
+* #830: ``sdist`` command no longer suppresses the inclusion
+  of data files, re-aligning with the expectation of distutils
+  and addressing #274 and #521.
+
+v28.6.1
+-------
+
+* #816: Fix manifest file list order in tests.
+
+v28.6.0
+-------
+
+* #629: When scanning for packages, ``pkg_resources`` now
+  ignores empty egg-info directories and gives precedence to
+  packages whose versions are lexicographically greatest,
+  a rough approximation for preferring the latest available
+  version.
+
+v28.5.0
+-------
+
+* #810: Tests are now invoked with tox and not setup.py test.
+* #249 and #450 via #764: Avoid scanning the whole tree
+  when building the manifest. Also fixes a long-standing bug
+  where patterns in ``MANIFEST.in`` had implicit wildcard
+  matching. This caused ``global-exclude .foo`` to exclude
+  all ``*.foo`` files, but also ``global-exclude bar.py`` to
+  exclude ``foo_bar.py``.
+
+v28.4.0
+-------
+
+* #732: Now extras with a hyphen are honored per PEP 426.
+* #811: Update to pyparsing 2.1.10.
+* Updated ``setuptools.command.sdist`` to re-use most of
+  the functionality directly from ``distutils.command.sdist``
+  for the ``add_defaults`` method with strategic overrides.
+  See #750 for rationale.
+* #760 via #762: Look for certificate bundle where SUSE
+  Linux typically presents it. Use ``certifi.where()`` to locate
+  the bundle.
+
+v28.3.0
+-------
+
+* #809: In ``find_packages()``, restore support for excluding
+  a parent package without excluding a child package.
+
+* #805: Disable ``-nspkg.pth`` behavior on Python 3.3+ where
+  PEP-420 functionality is adequate. Fixes pip #1924.
+
+v28.1.0
+-------
+
+* #803: Bump certifi to 2016.9.26.
+
+v28.0.0
+-------
+
+* #733: Do not search excluded directories for packages.
+  This introduced a backwards incompatible change in ``find_packages()``
+  so that ``find_packages(exclude=['foo']) == []``, excluding subpackages of ``foo``.
+  Previously, ``find_packages(exclude=['foo']) == ['foo.bar']``,
+  even though the parent ``foo`` package was excluded.
+
+* #795: Bump certifi.
+
+* #719: Suppress decoding errors and instead log a warning
+  when metadata cannot be decoded.
+
+v27.3.1
+-------
+
+* #790: In MSVC monkeypatching, explicitly patch each
+  function by name in the target module instead of inferring
+  the module from the function's ``__module__``. Improves
+  compatibility with other packages that might have previously
+  patched distutils functions (i.e. NumPy).
+
+v27.3.0
+-------
+
+* #794: In test command, add installed eggs to PYTHONPATH
+  when invoking tests so that subprocesses will also have the
+  dependencies available. Fixes `tox 330
+  <https://github.com/tox-dev/tox/issues/330>`_.
+
+* #795: Update vendored pyparsing 2.1.9.
+
+v27.2.0
+-------
+
+* #520 and #513: Suppress ValueErrors in fixup_namespace_packages
+  when lookup fails.
+
+* Nicer, more consistent interfaces for msvc monkeypatching.
+
+v27.1.2
+-------
+
+* #779 via #781: Fix circular import.
+
+v27.1.1
+-------
+
+* #778: Fix MSVC monkeypatching.
+
+v27.1.0
+-------
+
+* Introduce the (private) ``monkey`` module to encapsulate
+  the distutils monkeypatching behavior.
+
+v27.0.0
+-------
+
+* Now use Warehouse by default for
+  ``upload``, patching ``distutils.config.PyPIRCCommand`` to
+  affect default behavior.
+
+  Any config in .pypirc should be updated to replace
+
+    https://pypi.python.org/pypi/
+
+  with
+
+    https://upload.pypi.org/legacy/
+
+  Similarly, any passwords stored in the keyring should be
+  updated to use this new value for "system".
+
+  The ``upload_docs`` command will continue to use the python.org
+  site, but the command is now deprecated. Users are urged to use
+  Read The Docs instead.
+
+* #776: Use EXT_SUFFIX for py_limited_api renaming.
+
+* #774 and #775: Use LegacyVersion from packaging when
+  detecting numpy versions.
+
+v26.1.1
+-------
+
+* Re-release of 26.1.0 with pytest pinned to allow for automated
+  deployment and thus proper packaging environment variables,
+  fixing issues with missing executable launchers.
+
+v26.1.0
+-------
+
+* #763: ``pkg_resources.get_default_cache`` now defers to the
+  `appdirs project <https://pypi.org/project/appdirs>`_ to
+  resolve the cache directory. Adds a vendored dependency on
+  appdirs to pkg_resources.
+
+v26.0.0
+-------
+
+* #748: By default, sdists are now produced in gzipped tarfile
+  format by default on all platforms, adding forward compatibility
+  for the same behavior in Python 3.6 (See Python #27819).
+
+* #459 via #736: On Windows with script launchers,
+  sys.argv[0] now reflects
+  the name of the entry point, consistent with the behavior in
+  distlib and pip wrappers.
+
+* #752 via #753: When indicating ``py_limited_api`` to Extension,
+  it must be passed as a keyword argument.
+
+v25.4.0
+-------
+
+* Add Extension(py_limited_api=True). When set to a truthy value,
+  that extension gets a filename appropriate for code using Py_LIMITED_API.
+  When used correctly this allows a single compiled extension to work on
+  all future versions of CPython 3.
+  The py_limited_api argument only controls the filename. To be
+  compatible with multiple versions of Python 3, the C extension
+  will also need to set -DPy_LIMITED_API=... and be modified to use
+  only the functions in the limited API.
+
+v25.3.0
+-------
+
+* #739 Fix unquoted libpaths by fixing compatibility between `numpy.distutils` and `distutils._msvccompiler` for numpy < 1.11.2 (Fix issue #728, error also fixed in Numpy).
+
+* #731: Bump certifi.
+
+* Style updates. See #740, #741, #743, #744, #742, #747.
+
+* #735: include license file.
+
+v25.2.0
+-------
+
+* #612 via #730: Add a LICENSE file which needs to be provided by the terms of
+  the MIT license.
+
+v25.1.6
+-------
+
+* #725: revert `library_dir_option` patch (Error is related to `numpy.distutils` and make errors on non Numpy users).
+
+v25.1.5
+-------
+
+* #720
+* #723: Improve patch for `library_dir_option`.
+
+v25.1.4
+-------
+
+* #717
+* #713
+* #707: Fix Python 2 compatibility for MSVC by catching errors properly.
+* #715: Fix unquoted libpaths by patching `library_dir_option`.
+
+v25.1.3
+-------
+
+* #714 and #704: Revert fix as it breaks other components
+  downstream that can't handle unicode. See #709, #710,
+  and #712.
+
+v25.1.2
+-------
+
+* #704: Fix errors when installing a zip sdist that contained
+  files named with non-ascii characters on Windows would
+  crash the install when it attempted to clean up the build.
+* #646: MSVC compatibility - catch errors properly in
+  RegistryInfo.lookup.
+* #702: Prevent UnboundLocalError when initial working_set
+  is empty.
+
+v25.1.1
+-------
+
+* #686: Fix issue in sys.path ordering by pkg_resources when
+  rewrite technique is "raw".
+* #699: Fix typo in msvc support.
+
+v25.1.0
+-------
+
+* #609: Setuptools will now try to download a distribution from
+  the next possible download location if the first download fails.
+  This means you can now specify multiple links as ``dependency_links``
+  and all links will be tried until a working download link is encountered.
+
+v25.0.2
+-------
+
+* #688: Fix AttributeError in setup.py when invoked not from
+  the current directory.
+
+v25.0.1
+-------
+
+* Cleanup of setup.py script.
+
+* Fixed documentation builders by allowing setup.py
+  to be imported without having bootstrapped the
+  metadata.
+
+* More style cleanup. See #677, #678, #679, #681, #685.
+
+v25.0.0
+-------
+
+* #674: Default ``sys.path`` manipulation by easy-install.pth
+  is now "raw", meaning that when writing easy-install.pth
+  during any install operation, the ``sys.path`` will not be
+  rewritten and will no longer give preference to easy_installed
+  packages.
+
+  To retain the old behavior when using any easy_install
+  operation (including ``setup.py install`` when setuptools is
+  present), set the environment variable:
+
+    SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite
+
+  This project hopes that that few if any environments find it
+  necessary to retain the old behavior, and intends to drop
+  support for it altogether in a future release. Please report
+  any relevant concerns in the ticket for this change.
+
+v24.3.1
+-------
+
+* #398: Fix shebang handling on Windows in script
+  headers where spaces in ``sys.executable`` would
+  produce an improperly-formatted shebang header,
+  introduced in 12.0 with the fix for #188.
+
+* #663, #670: More style updates.
+
+v24.3.0
+-------
+
+* #516: Disable ``os.link`` to avoid hard linking
+  in ``sdist.make_distribution``, avoiding errors on
+  systems that support hard links but not on the
+  file system in which the build is occurring.
+
+v24.2.1
+-------
+
+* #667: Update Metadata-Version to 1.2 when
+  ``python_requires`` is supplied.
+
+v24.2.0
+-------
+
+* #631: Add support for ``python_requires`` keyword.
+
+v24.1.1
+-------
+
+* More style updates. See #660, #661, #641.
+
+v24.1.0
+-------
+
+* #659: ``setup.py`` now will fail fast and with a helpful
+  error message when the necessary metadata is missing.
+* More style updates. See #656, #635, #640,
+  #644, #650, #652, and #655.
+
+v24.0.3
+-------
+
+* Updated style in much of the codebase to match
+  community expectations. See #632, #633, #634,
+  #637, #639, #638, #642, #648.
+
+v24.0.2
+-------
+
+* If MSVC++14 is needed ``setuptools.msvc`` now redirect
+  user to Visual C++ Build Tools web page.
+
+v24.0.1
+-------
+
+* #625 and #626: Fixes on ``setuptools.msvc`` mainly
+  for Python 2 and Linux.
+
+v24.0.0
+-------
+
+* Pull Request #174: Add more aggressive support for
+  standalone Microsoft Visual C++ compilers in
+  msvc9compiler patch.
+  Particularly : Windows SDK 6.1 and 7.0
+  (MSVC++ 9.0), Windows SDK 7.1 (MSVC++ 10.0),
+  Visual C++ Build Tools 2015 (MSVC++14)
+* Renamed ``setuptools.msvc9_support`` to
+  ``setuptools.msvc``.
+
+v23.2.1
+-------
+
+Re-release of v23.2.0, which was missing the intended
+commits.
+
+* #623: Remove used of deprecated 'U' flag when reading
+  manifests.
+
+v23.1.0
+-------
+
+* #619: Deprecated ``tag_svn_revision`` distribution
+  option.
+
+v23.0.0
+-------
+
+* #611: Removed ARM executables for CLI and GUI script
+  launchers on Windows. If this was a feature you cared
+  about, please comment in the ticket.
+* #604: Removed docs building support. The project
+  now relies on documentation hosted at
+  https://setuptools.readthedocs.io/.
+
+v22.0.5
+-------
+
+* #604: Restore repository for upload_docs command
+  to restore publishing of docs during release.
+
+v22.0.4
+-------
+
+* #589: Upload releases to pypi.io using the upload
+  hostname and legacy path.
+
+v22.0.3
+-------
+
+* #589: Releases are now uploaded to pypi.io (Warehouse)
+  even when releases are made on Twine via Travis.
+
+v22.0.2
+-------
+
+* #589: Releases are now uploaded to pypi.io (Warehouse).
+
+v22.0.1
+-------
+
+* #190: On Python 2, if unicode is passed for packages to
+  ``build_py`` command, it will be handled just as with
+  text on Python 3.
+
+v22.0.0
+-------
+
+Intended to be v21.3.0, but jaraco accidentally released as
+a major bump.
+
+* #598: Setuptools now lists itself first in the User-Agent
+  for web requests, better following the guidelines in
+  `RFC 7231
+  <https://tools.ietf.org/html/rfc7231#section-5.5.3>`_.
+
+v21.2.2
+-------
+
+* Minor fixes to changelog and docs.
+
+v21.2.1
+-------
+
+* #261: Exclude directories when resolving globs in
+  package_data.
+
+v21.2.0
+-------
+
+* #539: In the easy_install get_site_dirs, honor all
+  paths found in ``site.getsitepackages``.
+
+v21.1.0
+-------
+
+* #572: In build_ext, now always import ``_CONFIG_VARS``
+  from ``distutils`` rather than from ``sysconfig``
+  to allow ``distutils.sysconfig.customize_compiler``
+  configure the OS X compiler for ``-dynamiclib``.
+
+v21.0.0
+-------
+
+* Removed ez_setup.py from Setuptools sdist. The
+  bootstrap script will be maintained in its own
+  branch and should be generally be retrieved from
+  its canonical location at
+  https://bootstrap.pypa.io/ez_setup.py.
+
+v20.10.0
+--------
+
+* #553: egg_info section is now generated in a
+  deterministic order, matching the order generated
+  by earlier versions of Python. Except on Python 2.6,
+  order is preserved when existing settings are present.
+* #556: Update to Packaging 16.7, restoring support
+  for deprecated ``python_implmentation`` marker.
+* #555: Upload command now prompts for a password
+  when uploading to PyPI (or other repository) if no
+  password is present in .pypirc or in the keyring.
+
+v20.9.0
+-------
+
+* #548: Update certify version to 2016.2.28
+* #545: Safely handle deletion of non-zip eggs in rotate
+  command.
+
+v20.8.1
+-------
+
+* Issue #544: Fix issue with extra environment marker
+  processing in WorkingSet due to refactor in v20.7.0.
+
+v20.8.0
+-------
+
+* Issue #543: Re-release so that latest release doesn't
+  cause déjà vu with distribute and setuptools 0.7 in
+  older environments.
+
+v20.7.0
+-------
+
+* Refactored extra environment marker processing
+  in WorkingSet.
+* Issue #533: Fixed intermittent test failures.
+* Issue #536: In msvc9_support, trap additional exceptions
+  that might occur when importing
+  ``distutils.msvc9compiler`` in mingw environments.
+* Issue #537: Provide better context when package
+  metadata fails to decode in UTF-8.
+
+v20.6.8
+-------
+
+* Issue #523: Restored support for environment markers,
+  now honoring 'extra' environment markers.
+
+v20.6.7
+-------
+
+* Issue #523: Disabled support for environment markers
+  introduced in v20.5.
+
+v20.6.6
+-------
+
+* Issue #503: Restore support for PEP 345 environment
+  markers by updating to Packaging 16.6.
+
+v20.6.0
+-------
+
+* New release process that relies on
+  `bumpversion <https://github.com/peritus/bumpversion>`_
+  and Travis CI for continuous deployment.
+* Project versioning semantics now follow
+  `semver <https://semver.org>`_ precisely.
+  The 'v' prefix on version numbers now also allows
+  version numbers to be referenced in the changelog,
+  e.g. http://setuptools.readthedocs.io/en/latest/history.html#v20-6-0.
+
+20.5
+----
+
+* BB Pull Request #185, #470: Add support for environment markers
+  in requirements in install_requires, setup_requires,
+  tests_require as well as adding a test for the existing
+  extra_requires machinery.
+
+20.4
+----
+
+* Issue #422: Moved hosting to
+  `Github <https://github.com/pypa/setuptools>`_
+  from `Bitbucket <https://bitbucket.org/pypa/setuptools>`_.
+  Issues have been migrated, though all issues and comments
+  are attributed to bb-migration. So if you have a particular
+  issue or issues to which you've been subscribed, you will
+  want to "watch" the equivalent issue in Github.
+  The Bitbucket project will be retained for the indefinite
+  future, but Github now hosts the canonical project repository.
+
+20.3.1
+------
+
+* Issue #519: Remove import hook when reloading the
+  ``pkg_resources`` module.
+* BB Pull Request #184: Update documentation in ``pkg_resources``
+  around new ``Requirement`` implementation.
+
+20.3
+----
+
+* BB Pull Request #179: ``pkg_resources.Requirement`` objects are
+  now a subclass of ``packaging.requirements.Requirement``,
+  allowing any environment markers and url (if any) to be
+  affiliated with the requirement
+* BB Pull Request #179: Restore use of RequirementParseError
+  exception unintentionally dropped in 20.2.
+
+20.2.2
+------
+
+* Issue #502: Correct regression in parsing of multiple
+  version specifiers separated by commas and spaces.
+
+20.2.1
+------
+
+* Issue #499: Restore compatibility for legacy versions
+  by bumping to packaging 16.4.
+
+20.2
+----
+
+* Changelog now includes release dates and links to PEPs.
+* BB Pull Request #173: Replace dual PEP 345 _markerlib implementation
+  and PEP 426 implementation of environment marker support from
+  packaging 16.1 and PEP 508. Fixes Issue #122.
+  See also BB Pull Request #175, BB Pull Request #168, and
+  BB Pull Request #164. Additionally:
+
+   - ``Requirement.parse`` no longer retains the order of extras.
+   - ``parse_requirements`` now requires that all versions be
+     PEP-440 compliant, as revealed in #499. Packages released
+     with invalid local versions should be re-released using
+     the proper local version syntax, e.g. ``mypkg-1.0+myorg.1``.
+
+20.1.1
+------
+
+* Update ``upload_docs`` command to also honor keyring
+  for password resolution.
+
+20.1
+----
+
+* Added support for using passwords from keyring in the upload
+  command. See `the upload docs
+  <https://setuptools.readthedocs.io/en/latest/setuptools.html#upload-upload-source-and-or-egg-distributions-to-pypi>`_
+  for details.
+
+20.0
+----
+
+* Issue #118: Once again omit the package metadata (egg-info)
+  from the list of outputs in ``--record``. This version of setuptools
+  can no longer be used to upgrade pip earlier than 6.0.
+
+19.7
+----
+
+* `Off-project PR <https://github.com/jaraco/setuptools/pull/32>`_:
+  For FreeBSD, also honor root certificates from ca_root_nss.
+
+19.6.2
+------
+
+* Issue #491: Correct regression incurred in 19.4 where
+  a double-namespace package installed using pip would
+  cause a TypeError.
+
+19.6.1
+------
+
+* Restore compatibility for PyPy 3 compatibility lost in
+  19.4.1 addressing Issue #487.
+* ``setuptools.launch`` shim now loads scripts in a new
+  namespace, avoiding getting relative imports from
+  the setuptools package on Python 2.
+
+19.6
+----
+
+* Added a new entry script ``setuptools.launch``,
+  implementing the shim found in
+  ``pip.util.setuptools_build``. Use this command to launch
+  distutils-only packages under setuptools in the same way that
+  pip does, causing the setuptools monkeypatching of distutils
+  to be invoked prior to invoking a script. Useful for debugging
+  or otherwise installing a distutils-only package under
+  setuptools when pip isn't available or otherwise does not
+  expose the desired functionality. For example::
+
+    $ python -m setuptools.launch setup.py develop
+
+* Issue #488: Fix dual manifestation of Extension class in
+  extension packages installed as dependencies when Cython
+  is present.
+
+19.5
+----
+
+* Issue #486: Correct TypeError when getfilesystemencoding
+  returns None.
+* Issue #139: Clarified the license as MIT.
+* BB Pull Request #169: Removed special handling of command
+  spec in scripts for Jython.
+
+19.4.1
+------
+
+* Issue #487: Use direct invocation of ``importlib.machinery``
+  in ``pkg_resources`` to avoid missing detection on relevant
+  platforms.
+
+19.4
+----
+
+* Issue #341: Correct error in path handling of package data
+  files in ``build_py`` command when package is empty.
+* Distribute #323, Issue #141, Issue #207, and
+  BB Pull Request #167: Another implementation of
+  ``pkg_resources.WorkingSet`` and ``pkg_resources.Distribution``
+  that supports replacing an extant package with a new one,
+  allowing for setup_requires dependencies to supersede installed
+  packages for the session.
+
+19.3
+----
+
+* Issue #229: Implement new technique for readily incorporating
+  dependencies conditionally from vendored copies or primary
+  locations. Adds a new dependency on six.
+
+19.2
+----
+
+* BB Pull Request #163: Add get_command_list method to Distribution.
+* BB Pull Request #162: Add missing whitespace to multiline string
+  literals.
+
+19.1.1
+------
+
+* Issue #476: Cast version to string (using default encoding)
+  to avoid creating Unicode types on Python 2 clients.
+* Issue #477: In Powershell downloader, use explicit rendering
+  of strings, rather than rely on ``repr``, which can be
+  incorrect (especially on Python 2).
+
+19.1
+----
+
+* Issue #215: The bootstrap script ``ez_setup.py`` now
+  automatically detects
+  the latest version of setuptools (using PyPI JSON API) rather
+  than hard-coding a particular value.
+* Issue #475: Fix incorrect usage in _translate_metadata2.
+
+19.0
+----
+
+* Issue #442: Use RawConfigParser for parsing .pypirc file.
+  Interpolated values are no longer honored in .pypirc files.
+
+18.8.1
+------
+
+* Issue #440: Prevent infinite recursion when a SandboxViolation
+  or other UnpickleableException occurs in a sandbox context
+  with setuptools hidden. Fixes regression introduced in Setuptools
+  12.0.
+
+18.8
+----
+
+* Deprecated ``egg_info.get_pkg_info_revision``.
+* Issue #471: Don't rely on repr for an HTML attribute value in
+  package_index.
+* Issue #419: Avoid errors in FileMetadata when the metadata directory
+  is broken.
+* Issue #472: Remove deprecated use of 'U' in mode parameter
+  when opening files.
+
+18.7.1
+------
+
+* Issue #469: Refactored logic for Issue #419 fix to re-use metadata
+  loading from Provider.
+
+18.7
+----
+
+* Update dependency on certify.
+* BB Pull Request #160: Improve detection of gui script in
+  ``easy_install._adjust_header``.
+* Made ``test.test_args`` a non-data property; alternate fix
+  for the issue reported in BB Pull Request #155.
+* Issue #453: In ``ez_setup`` bootstrap module, unload all
+  ``pkg_resources`` modules following download.
+* BB Pull Request #158: Honor PEP-488 when excluding
+  files for namespace packages.
+* Issue #419 and BB Pull Request #144: Add experimental support for
+  reading the version info from distutils-installed metadata rather
+  than using the version in the filename.
+
+18.6.1
+------
+
+* Issue #464: Correct regression in invocation of superclass on old-style
+  class on Python 2.
+
+18.6
+----
+
+* Issue #439: When installing entry_point scripts under development,
+  omit the version number of the package, allowing any version of the
+  package to be used.
+
+18.5
+----
+
+* In preparation for dropping support for Python 3.2, a warning is
+  now logged when pkg_resources is imported on Python 3.2 or earlier
+  Python 3 versions.
+* `Add support for python_platform_implementation environment marker
+  <https://github.com/jaraco/setuptools/pull/28>`_.
+* `Fix dictionary mutation during iteration
+  <https://github.com/jaraco/setuptools/pull/29>`_.
+
+18.4
+----
+
+* Issue #446: Test command now always invokes unittest, even
+  if no test suite is supplied.
+
+18.3.2
+------
+
+* Correct another regression in setuptools.findall
+  where the fix for Python #12885 was lost.
+
+18.3.1
+------
+
+* Issue #425: Correct regression in setuptools.findall.
+
+18.3
+----
+
+* BB Pull Request #135: Setuptools now allows disabling of
+  the manipulation of the sys.path
+  during the processing of the easy-install.pth file. To do so, set
+  the environment variable ``SETUPTOOLS_SYS_PATH_TECHNIQUE`` to
+  anything but "rewrite" (consider "raw"). During any install operation
+  with manipulation disabled, setuptools packages will be appended to
+  sys.path naturally.
+
+  Future versions may change the default behavior to disable
+  manipulation. If so, the default behavior can be retained by setting
+  the variable to "rewrite".
+
+* Issue #257: ``easy_install --version`` now shows more detail
+  about the installation location and Python version.
+
+* Refactor setuptools.findall in preparation for re-submission
+  back to distutils.
+
+18.2
+----
+
+* Issue #412: More efficient directory search in ``find_packages``.
+
+18.1
+----
+
+* Upgrade to vendored packaging 15.3.
+
+18.0.1
+------
+
+* Issue #401: Fix failure in test suite.
+
+18.0
+----
+
+* Dropped support for builds with Pyrex. Only Cython is supported.
+* Issue #288: Detect Cython later in the build process, after
+  ``setup_requires`` dependencies are resolved.
+  Projects backed by Cython can now be readily built
+  with a ``setup_requires`` dependency. For example::
+
+    ext = setuptools.Extension('mylib', ['src/CythonStuff.pyx', 'src/CStuff.c'])
+    setuptools.setup(
+        ...
+        ext_modules=[ext],
+        setup_requires=['cython'],
+    )
+
+  For compatibility with older versions of setuptools, packagers should
+  still include ``src/CythonMod.c`` in the source distributions or
+  require that Cython be present before building source distributions.
+  However, for systems with this build of setuptools, Cython will be
+  downloaded on demand.
+* Issue #396: Fixed test failure on OS X.
+* BB Pull Request #136: Remove excessive quoting from shebang headers
+  for Jython.
+
+17.1.1
+------
+
+* Backed out unintended changes to pkg_resources, restoring removal of
+  deprecated imp module (`ref
+  <https://bitbucket.org/pypa/setuptools/commits/f572ec9563d647fa8d4ffc534f2af8070ea07a8b#comment-1881283>`_).
+
+17.1
+----
+
+* Issue #380: Add support for range operators on environment
+  marker evaluation.
+
+17.0
+----
+
+* Issue #378: Do not use internal importlib._bootstrap module.
+* Issue #390: Disallow console scripts with path separators in
+  the name. Removes unintended functionality and brings behavior
+  into parity with pip.
+
+16.0
+----
+
+* BB Pull Request #130: Better error messages for errors in
+  parsed requirements.
+* BB Pull Request #133: Removed ``setuptools.tests`` from the
+  installed packages.
+* BB Pull Request #129: Address deprecation warning due to usage
+  of imp module.
+
+15.2
+----
+
+* Issue #373: Provisionally expose
+  ``pkg_resources._initialize_master_working_set``, allowing for
+  imperative re-initialization of the master working set.
+
+15.1
+----
+
+* Updated to Packaging 15.1 to address Packaging #28.
+* Fix ``setuptools.sandbox._execfile()`` with Python 3.1.
+
+15.0
+----
+
+* BB Pull Request #126: DistributionNotFound message now lists the package or
+  packages that required it. E.g.::
+
+      pkg_resources.DistributionNotFound: The 'colorama>=0.3.1' distribution was not found and is required by smlib.log.
+
+  Note that zc.buildout once dependended on the string rendering of this
+  message to determine the package that was not found. This expectation
+  has since been changed, but older versions of buildout may experience
+  problems. See Buildout #242 for details.
+
+14.3.1
+------
+
+* Issue #307: Removed PEP-440 warning during parsing of versions
+  in ``pkg_resources.Distribution``.
+* Issue #364: Replace deprecated usage with recommended usage of
+  ``EntryPoint.load``.
+
+14.3
+----
+
+* Issue #254: When creating temporary egg cache on Unix, use mode 755
+  for creating the directory to avoid the subsequent warning if
+  the directory is group writable.
+
+14.2
+----
+
+* Issue #137: Update ``Distribution.hashcmp`` so that Distributions with
+  None for pyversion or platform can be compared against Distributions
+  defining those attributes.
+
+14.1.1
+------
+
+* Issue #360: Removed undesirable behavior from test runs, preventing
+  write tests and installation to system site packages.
+
+14.1
+----
+
+* BB Pull Request #125: Add ``__ne__`` to Requirement class.
+* Various refactoring of easy_install.
+
+14.0
+----
+
+* Bootstrap script now accepts ``--to-dir`` to customize save directory or
+  allow for re-use of existing repository of setuptools versions. See
+  BB Pull Request #112 for background.
+* Issue #285: ``easy_install`` no longer will default to installing
+  packages to the "user site packages" directory if it is itself installed
+  there. Instead, the user must pass ``--user`` in all cases to install
+  packages to the user site packages.
+  This behavior now matches that of "pip install". To configure
+  an environment to always install to the user site packages, consider
+  using the "install-dir" and "scripts-dir" parameters to easy_install
+  through an appropriate distutils config file.
+
+13.0.2
+------
+
+* Issue #359: Include pytest.ini in the sdist so invocation of py.test on the
+  sdist honors the pytest configuration.
+
+13.0.1
+------
+
+Re-release of 13.0. Intermittent connectivity issues caused the release
+process to fail and PyPI uploads no longer accept files for 13.0.
+
+13.0
+----
+
+* Issue #356: Back out BB Pull Request #119 as it requires Setuptools 10 or later
+  as the source during an upgrade.
+* Removed build_py class from setup.py. According to 892f439d216e, this
+  functionality was added to support upgrades from old Distribute versions,
+  0.6.5 and 0.6.6.
+
+12.4
+----
+
+* BB Pull Request #119: Restore writing of ``setup_requires`` to metadata
+  (previously added in 8.4 and removed in 9.0).
+
+12.3
+----
+
+* Documentation is now linked using the rst.linker package.
+* Fix ``setuptools.command.easy_install.extract_wininst_cfg()``
+  with Python 2.6 and 2.7.
+* Issue #354. Added documentation on building setuptools
+  documentation.
+
+12.2
+----
+
+* Issue #345: Unload all modules under pkg_resources during
+  ``ez_setup.use_setuptools()``.
+* Issue #336: Removed deprecation from ``ez_setup.use_setuptools``,
+  as it is clearly still used by buildout's bootstrap. ``ez_setup``
+  remains deprecated for use by individual packages.
+* Simplified implementation of ``ez_setup.use_setuptools``.
+
+12.1
+----
+
+* BB Pull Request #118: Soften warning for non-normalized versions in
+  Distribution.
+
+12.0.5
+------
+
+* Issue #339: Correct Attribute reference in ``cant_write_to_target``.
+* Issue #336: Deprecated ``ez_setup.use_setuptools``.
+
+12.0.4
+------
+
+* Issue #335: Fix script header generation on Windows.
+
+12.0.3
+------
+
+* Fixed incorrect class attribute in ``install_scripts``. Tests would be nice.
+
+12.0.2
+------
+
+* Issue #331: Fixed ``install_scripts`` command on Windows systems corrupting
+  the header.
+
+12.0.1
+------
+
+* Restore ``setuptools.command.easy_install.sys_executable`` for pbr
+  compatibility. For the future, tools should construct a CommandSpec
+  explicitly.
+
+12.0
+----
+
+* Issue #188: Setuptools now support multiple entities in the value for
+  ``build.executable``, such that an executable of "/usr/bin/env my-python" may
+  be specified. This means that systems with a specified executable whose name
+  has spaces in the path must be updated to escape or quote that value.
+* Deprecated ``easy_install.ScriptWriter.get_writer``, replaced by ``.best()``
+  with slightly different semantics (no force_windows flag).
+
+11.3.1
+------
+
+* Issue #327: Formalize and restore support for any printable character in an
+  entry point name.
+
+11.3
+----
+
+* Expose ``EntryPoint.resolve`` in place of EntryPoint._load, implementing the
+  simple, non-requiring load. Deprecated all uses of ``EntryPoint._load``
+  except for calling with no parameters, which is just a shortcut for
+  ``ep.require(); ep.resolve();``.
+
+  Apps currently invoking ``ep.load(require=False)`` should instead do the
+  following if wanting to avoid the deprecating warning::
+
+    getattr(ep, "resolve", lambda: ep.load(require=False))()
+
+11.2
+----
+
+* Pip #2326: Report deprecation warning at stacklevel 2 for easier diagnosis.
+
+11.1
+----
+
+* Issue #281: Since Setuptools 6.1 (Issue #268), a ValueError would be raised
+  in certain cases where VersionConflict was raised with two arguments, which
+  occurred in ``pkg_resources.WorkingSet.find``. This release adds support
+  for indicating the dependent packages while maintaining support for
+  a VersionConflict when no dependent package context is known. New unit tests
+  now capture the expected interface.
+
+11.0
+----
+
+* Interop #3: Upgrade to Packaging 15.0; updates to PEP 440 so that >1.7 does
+  not exclude 1.7.1 but does exclude 1.7.0 and 1.7.0.post1.
+
+10.2.1
+------
+
+* Issue #323: Fix regression in entry point name parsing.
+
+10.2
+----
+
+* Deprecated use of EntryPoint.load(require=False). Passing a boolean to a
+  function to select behavior is an anti-pattern. Instead use
+  ``Entrypoint._load()``.
+* Substantial refactoring of all unit tests. Tests are now much leaner and
+  re-use a lot of fixtures and contexts for better clarity of purpose.
+
+10.1
+----
+
+* Issue #320: Added a compatibility implementation of
+  ``sdist._default_revctrl``
+  so that systems relying on that interface do not fail (namely, Ubuntu 12.04
+  and similar Debian releases).
+
+10.0.1
+------
+
+* Issue #319: Fixed issue installing pure distutils packages.
+
+10.0
+----
+
+* Issue #313: Removed built-in support for subversion. Projects wishing to
+  retain support for subversion will need to use a third party library. The
+  extant implementation is being ported to `setuptools_svn
+  <https://pypi.org/project/setuptools_svn/>`_.
+* Issue #315: Updated setuptools to hide its own loaded modules during
+  installation of another package. This change will enable setuptools to
+  upgrade (or downgrade) itself even when its own metadata and implementation
+  change.
+
+9.1
+---
+
+* Prefer vendored packaging library `as recommended
+  <https://github.com/jaraco/setuptools/commit/170657b68f4b92e7e1bf82f5e19a831f5744af67#commitcomment-9109448>`_.
+
+9.0.1
+-----
+
+* Issue #312: Restored presence of pkg_resources API tests (doctest) to sdist.
+
+9.0
+---
+
+* Issue #314: Disabled support for ``setup_requires`` metadata to avoid issue
+  where Setuptools was unable to upgrade over earlier versions.
+
+8.4
+---
+
+* BB Pull Request #106: Now write ``setup_requires`` metadata.
+
+8.3
+---
+
+* Issue #311: Decoupled pkg_resources from setuptools once again.
+  ``pkg_resources`` is now a package instead of a module.
+
+8.2.1
+-----
+
+* Issue #306: Suppress warnings about Version format except in select scenarios
+  (such as installation).
+
+8.2
+---
+
+* BB Pull Request #85: Search egg-base when adding egg-info to manifest.
+
+8.1
+---
+
+* Upgrade ``packaging`` to 14.5, giving preference to "rc" as designator for
+  release candidates over "c".
+* PEP-440 warnings are now raised as their own class,
+  ``pkg_resources.PEP440Warning``, instead of RuntimeWarning.
+* Disabled warnings on empty versions.
+
+8.0.4
+-----
+
+* Upgrade ``packaging`` to 14.4, fixing an error where there is a
+  different result for if 2.0.5 is contained within >2.0dev and >2.0.dev even
+  though normalization rules should have made them equal.
+* Issue #296: Add warning when a version is parsed as legacy. This warning will
+  make it easier for developers to recognize deprecated version numbers.
+
+8.0.3
+-----
+
+* Issue #296: Restored support for ``__hash__`` on parse_version results.
+
+8.0.2
+-----
+
+* Issue #296: Restored support for ``__getitem__`` and sort operations on
+  parse_version result.
+
+8.0.1
+-----
+
+* Issue #296: Restore support for iteration over parse_version result, but
+  deprecated that usage with a warning. Fixes failure with buildout.
+
+8.0
+---
+
+* Implement PEP 440 within
+  pkg_resources and setuptools. This change
+  deprecates some version numbers such that they will no longer be installable
+  without using the ``===`` escape hatch. See `the changes to test_resources
+  <https://bitbucket.org/pypa/setuptools/commits/dcd552da643c4448056de84c73d56da6d70769d5#chg-setuptools/tests/test_resources.py>`_
+  for specific examples of version numbers and specifiers that are no longer
+  supported. Setuptools now "vendors" the `packaging
+  <https://github.com/pypa/packaging>`_ library.
+
+7.0
+---
+
+* Issue #80, Issue #209: Eggs that are downloaded for ``setup_requires``,
+  ``test_requires``, etc. are now placed in a ``./.eggs`` directory instead of
+  directly in the current directory. This choice of location means the files
+  can be readily managed (removed, ignored). Additionally,
+  later phases or invocations of setuptools will not detect the package as
+  already installed and ignore it for permanent install (See #209).
+
+  This change is indicated as backward-incompatible as installations that
+  depend on the installation in the current directory will need to account for
+  the new location. Systems that ignore ``*.egg`` will probably need to be
+  adapted to ignore ``.eggs``. The files will need to be manually moved or
+  will be retrieved again. Most use cases will require no attention.
+
+6.1
+---
+
+* Issue #268: When resolving package versions, a VersionConflict now reports
+  which package previously required the conflicting version.
+
+6.0.2
+-----
+
+* Issue #262: Fixed regression in pip install due to egg-info directories
+  being omitted. Re-opens Issue #118.
+
+6.0.1
+-----
+
+* Issue #259: Fixed regression with namespace package handling on ``single
+  version, externally managed`` installs.
+
+6.0
+---
+
+* Issue #100: When building a distribution, Setuptools will no longer match
+  default files using platform-dependent case sensitivity, but rather will
+  only match the files if their case matches exactly. As a result, on Windows
+  and other case-insensitive file systems, files with names such as
+  'readme.txt' or 'README.TXT' will be omitted from the distribution and a
+  warning will be issued indicating that 'README.txt' was not found. Other
+  filenames affected are:
+
+    - README.rst
+    - README
+    - setup.cfg
+    - setup.py (or the script name)
+    - test/test*.py
+
+  Any users producing distributions with filenames that match those above
+  case-insensitively, but not case-sensitively, should rename those files in
+  their repository for better portability.
+* BB Pull Request #72: When using ``single_version_externally_managed``, the
+  exclusion list now includes Python 3.2 ``__pycache__`` entries.
+* BB Pull Request #76 and BB Pull Request #78: lines in top_level.txt are now
+  ordered deterministically.
+* Issue #118: The egg-info directory is now no longer included in the list
+  of outputs.
+* Issue #258: Setuptools now patches distutils msvc9compiler to
+  recognize the specially-packaged compiler package for easy extension module
+  support on Python 2.6, 2.7, and 3.2.
+
+5.8
+---
+
+* Issue #237: ``pkg_resources`` now uses explicit detection of Python 2 vs.
+  Python 3, supporting environments where builtins have been patched to make
+  Python 3 look more like Python 2.
+
+5.7
+---
+
+* Issue #240: Based on real-world performance measures against 5.4, zip
+  manifests are now cached in all circumstances. The
+  ``PKG_RESOURCES_CACHE_ZIP_MANIFESTS`` environment variable is no longer
+  relevant. The observed "memory increase" referenced in the 5.4 release
+  notes and detailed in Issue #154 was likely not an increase over the status
+  quo, but rather only an increase over not storing the zip info at all.
+
+5.6
+---
+
+* Issue #242: Use absolute imports in svn_utils to avoid issues if the
+  installing package adds an xml module to the path.
+
+5.5.1
+-----
+
+* Issue #239: Fix typo in 5.5 such that fix did not take.
+
+5.5
+---
+
+* Issue #239: Setuptools now includes the setup_requires directive on
+  Distribution objects and validates the syntax just like install_requires
+  and tests_require directives.
+
+5.4.2
+-----
+
+* Issue #236: Corrected regression in execfile implementation for Python 2.6.
+
+5.4.1
+-----
+
+* Python #7776: (ssl_support) Correct usage of host for validation when
+  tunneling for HTTPS.
+
+5.4
+---
+
+* Issue #154: ``pkg_resources`` will now cache the zip manifests rather than
+  re-processing the same file from disk multiple times, but only if the
+  environment variable ``PKG_RESOURCES_CACHE_ZIP_MANIFESTS`` is set. Clients
+  that package many modules in the same zip file will see some improvement
+  in startup time by enabling this feature. This feature is not enabled by
+  default because it causes a substantial increase in memory usage.
+
+5.3
+---
+
+* Issue #185: Make svn tagging work on the new style SVN metadata.
+  Thanks cazabon!
+* Prune revision control directories (e.g .svn) from base path
+  as well as sub-directories.
+
+5.2
+---
+
+* Added a `Developer Guide
+  <https://setuptools.readthedocs.io/en/latest/developer-guide.html>`_ to the official
+  documentation.
+* Some code refactoring and cleanup was done with no intended behavioral
+  changes.
+* During install_egg_info, the generated lines for namespace package .pth
+  files are now processed even during a dry run.
+
+5.1
+---
+
+* Issue #202: Implemented more robust cache invalidation for the ZipImporter,
+  building on the work in Issue #168. Special thanks to Jurko Gospodnetic and
+  PJE.
+
+5.0.2
+-----
+
+* Issue #220: Restored script templates.
+
+5.0.1
+-----
+
+* Renamed script templates to end with .tmpl now that they no longer need
+  to be processed by 2to3. Fixes spurious syntax errors during build/install.
+
+5.0
+---
+
+* Issue #218: Re-release of 3.8.1 to signal that it supersedes 4.x.
+* Incidentally, script templates were updated not to include the triple-quote
+  escaping.
+
+3.7.1 and 3.8.1 and 4.0.1
+-------------------------
+
+* Issue #213: Use legacy StringIO behavior for compatibility under pbr.
+* Issue #218: Setuptools 3.8.1 superseded 4.0.1, and 4.x was removed
+  from the available versions to install.
+
+4.0
+---
+
+* Issue #210: ``setup.py develop`` now copies scripts in binary mode rather
+  than text mode, matching the behavior of the ``install`` command.
+
+3.8
+---
+
+* Extend Issue #197 workaround to include all Python 3 versions prior to
+  3.2.2.
+
+3.7
+---
+
+* Issue #193: Improved handling of Unicode filenames when building manifests.
+
+3.6
+---
+
+* Issue #203: Honor proxy settings for Powershell downloader in the bootstrap
+  routine.
+
+3.5.2
+-----
+
+* Issue #168: More robust handling of replaced zip files and stale caches.
+  Fixes ZipImportError complaining about a 'bad local header'.
+
+3.5.1
+-----
+
+* Issue #199: Restored ``install._install`` for compatibility with earlier
+  NumPy versions.
+
+3.5
+---
+
+* Issue #195: Follow symbolic links in find_packages (restoring behavior
+  broken in 3.4).
+* Issue #197: On Python 3.1, PKG-INFO is now saved in a UTF-8 encoding instead
+  of ``sys.getpreferredencoding`` to match the behavior on Python 2.6-3.4.
+* Issue #192: Preferred bootstrap location is now
+  https://bootstrap.pypa.io/ez_setup.py (mirrored from former location).
+
+3.4.4
+-----
+
+* Issue #184: Correct failure where find_package over-matched packages
+  when directory traversal isn't short-circuited.
+
+3.4.3
+-----
+
+* Issue #183: Really fix test command with Python 3.1.
+
+3.4.2
+-----
+
+* Issue #183: Fix additional regression in test command on Python 3.1.
+
+3.4.1
+-----
+
+* Issue #180: Fix regression in test command not caught by py.test-run tests.
+
+3.4
+---
+
+* Issue #176: Add parameter to the test command to support a custom test
+  runner: --test-runner or -r.
+* Issue #177: Now assume most common invocation to install command on
+  platforms/environments without stack support (issuing a warning). Setuptools
+  now installs naturally on IronPython. Behavior on CPython should be
+  unchanged.
+
+3.3
+---
+
+* Add ``include`` parameter to ``setuptools.find_packages()``.
+
+3.2
+---
+
+* BB Pull Request #39: Add support for C++ targets from Cython ``.pyx`` files.
+* Issue #162: Update dependency on certifi to 1.0.1.
+* Issue #164: Update dependency on wincertstore to 0.2.
+
+3.1
+---
+
+* Issue #161: Restore Features functionality to allow backward compatibility
+  (for Features) until the uses of that functionality is sufficiently removed.
+
+3.0.2
+-----
+
+* Correct typo in previous bugfix.
+
+3.0.1
+-----
+
+* Issue #157: Restore support for Python 2.6 in bootstrap script where
+  ``zipfile.ZipFile`` does not yet have support for context managers.
+
+3.0
+---
+
+* Issue #125: Prevent Subversion support from creating a ~/.subversion
+  directory just for checking the presence of a Subversion repository.
+* Issue #12: Namespace packages are now imported lazily. That is, the mere
+  declaration of a namespace package in an egg on ``sys.path`` no longer
+  causes it to be imported when ``pkg_resources`` is imported. Note that this
+  change means that all of a namespace package's ``__init__.py`` files must
+  include a ``declare_namespace()`` call in order to ensure that they will be
+  handled properly at runtime. In 2.x it was possible to get away without
+  including the declaration, but only at the cost of forcing namespace
+  packages to be imported early, which 3.0 no longer does.
+* Issue #148: When building (bdist_egg), setuptools no longer adds
+  ``__init__.py`` files to namespace packages. Any packages that rely on this
+  behavior will need to create ``__init__.py`` files and include the
+  ``declare_namespace()``.
+* Issue #7: Setuptools itself is now distributed as a zip archive in addition to
+  tar archive. ez_setup.py now uses zip archive. This approach avoids the potential
+  security vulnerabilities presented by use of tar archives in ez_setup.py.
+  It also leverages the security features added to ZipFile.extract in Python 2.7.4.
+* Issue #65: Removed deprecated Features functionality.
+* BB Pull Request #28: Remove backport of ``_bytecode_filenames`` which is
+  available in Python 2.6 and later, but also has better compatibility with
+  Python 3 environments.
+* Issue #156: Fix spelling of __PYVENV_LAUNCHER__ variable.
+
+2.2
+---
+
+* Issue #141: Restored fix for allowing setup_requires dependencies to
+  override installed dependencies during setup.
+* Issue #128: Fixed issue where only the first dependency link was honored
+  in a distribution where multiple dependency links were supplied.
+
+2.1.2
+-----
+
+* Issue #144: Read long_description using codecs module to avoid errors
+  installing on systems where LANG=C.
+
+2.1.1
+-----
+
+* Issue #139: Fix regression in re_finder for CVS repos (and maybe Git repos
+  as well).
+
+2.1
+---
+
+* Issue #129: Suppress inspection of ``*.whl`` files when searching for files
+  in a zip-imported file.
+* Issue #131: Fix RuntimeError when constructing an egg fetcher.
+
+2.0.2
+-----
+
+* Fix NameError during installation with Python implementations (e.g. Jython)
+  not containing parser module.
+* Fix NameError in ``sdist:re_finder``.
+
+2.0.1
+-----
+
+* Issue #124: Fixed error in list detection in upload_docs.
+
+2.0
+---
+
+* Issue #121: Exempt lib2to3 pickled grammars from DirectorySandbox.
+* Issue #41: Dropped support for Python 2.4 and Python 2.5. Clients requiring
+  setuptools for those versions of Python should use setuptools 1.x.
+* Removed ``setuptools.command.easy_install.HAS_USER_SITE``. Clients
+  expecting this boolean variable should use ``site.ENABLE_USER_SITE``
+  instead.
+* Removed ``pkg_resources.ImpWrapper``. Clients that expected this class
+  should use ``pkgutil.ImpImporter`` instead.
+
+1.4.2
+-----
+
+* Issue #116: Correct TypeError when reading a local package index on Python
+  3.
+
+1.4.1
+-----
+
+* Issue #114: Use ``sys.getfilesystemencoding`` for decoding config in
+  ``bdist_wininst`` distributions.
+
+* Issue #105 and Issue #113: Establish a more robust technique for
+  determining the terminal encoding::
+
+    1. Try ``getpreferredencoding``
+    2. If that returns US_ASCII or None, try the encoding from
+       ``getdefaultlocale``. If that encoding was a "fallback" because Python
+       could not figure it out from the environment or OS, encoding remains
+       unresolved.
+    3. If the encoding is resolved, then make sure Python actually implements
+       the encoding.
+    4. On the event of an error or unknown codec, revert to fallbacks
+       (UTF-8 on Darwin, ASCII on everything else).
+    5. On the encoding is 'mac-roman' on Darwin, use UTF-8 as 'mac-roman' was
+       a bug on older Python releases.
+
+    On a side note, it would seem that the encoding only matters for when SVN
+    does not yet support ``--xml`` and when getting repository and svn version
+    numbers. The ``--xml`` technique should yield UTF-8 according to some
+    messages on the SVN mailing lists. So if the version numbers are always
+    7-bit ASCII clean, it may be best to only support the file parsing methods
+    for legacy SVN releases and support for SVN without the subprocess command
+    would simple go away as support for the older SVNs does.
+
+1.4
+---
+
+* Issue #27: ``easy_install`` will now use credentials from .pypirc if
+  present for connecting to the package index.
+* BB Pull Request #21: Omit unwanted newlines in ``package_index._encode_auth``
+  when the username/password pair length indicates wrapping.
+
+1.3.2
+-----
+
+* Issue #99: Fix filename encoding issues in SVN support.
+
+1.3.1
+-----
+
+* Remove exuberant warning in SVN support when SVN is not used.
+
+1.3
+---
+
+* Address security vulnerability in SSL match_hostname check as reported in
+  Python #17997.
+* Prefer `backports.ssl_match_hostname
+  <https://pypi.org/project/backports.ssl_match_hostname/>`_ for backport
+  implementation if present.
+* Correct NameError in ``ssl_support`` module (``socket.error``).
+
+1.2
+---
+
+* Issue #26: Add support for SVN 1.7. Special thanks to Philip Thiem for the
+  contribution.
+* Issue #93: Wheels are now distributed with every release. Note that as
+  reported in Issue #108, as of Pip 1.4, scripts aren't installed properly
+  from wheels. Therefore, if using Pip to install setuptools from a wheel,
+  the ``easy_install`` command will not be available.
+* Setuptools "natural" launcher support, introduced in 1.0, is now officially
+  supported.
+
+1.1.7
+-----
+
+* Fixed behavior of NameError handling in 'script template (dev).py' (script
+  launcher for 'develop' installs).
+* ``ez_setup.py`` now ensures partial downloads are cleaned up following
+  a failed download.
+* Distribute #363 and Issue #55: Skip an sdist test that fails on locales
+  other than UTF-8.
+
+1.1.6
+-----
+
+* Distribute #349: ``sandbox.execfile`` now opens the target file in binary
+  mode, thus honoring a BOM in the file when compiled.
+
+1.1.5
+-----
+
+* Issue #69: Second attempt at fix (logic was reversed).
+
+1.1.4
+-----
+
+* Issue #77: Fix error in upload command (Python 2.4).
+
+1.1.3
+-----
+
+* Fix NameError in previous patch.
+
+1.1.2
+-----
+
+* Issue #69: Correct issue where 404 errors are returned for URLs with
+  fragments in them (such as #egg=).
+
+1.1.1
+-----
+
+* Issue #75: Add ``--insecure`` option to ez_setup.py to accommodate
+  environments where a trusted SSL connection cannot be validated.
+* Issue #76: Fix AttributeError in upload command with Python 2.4.
+
+1.1
+---
+
+* Issue #71 (Distribute #333): EasyInstall now puts less emphasis on the
+  condition when a host is blocked via ``--allow-hosts``.
+* Issue #72: Restored Python 2.4 compatibility in ``ez_setup.py``.
+
+1.0
+---
+
+* Issue #60: On Windows, Setuptools supports deferring to another launcher,
+  such as Vinay Sajip's `pylauncher <https://bitbucket.org/pypa/pylauncher>`_
+  (included with Python 3.3) to launch console and GUI scripts and not install
+  its own launcher executables. This experimental functionality is currently
+  only enabled if  the ``SETUPTOOLS_LAUNCHER`` environment variable is set to
+  "natural". In the future, this behavior may become default, but only after
+  it has matured and seen substantial adoption. The ``SETUPTOOLS_LAUNCHER``
+  also accepts "executable" to force the default behavior of creating launcher
+  executables.
+* Issue #63: Bootstrap script (ez_setup.py) now prefers Powershell, curl, or
+  wget for retrieving the Setuptools tarball for improved security of the
+  install. The script will still fall back to a simple ``urlopen`` on
+  platforms that do not have these tools.
+* Issue #65: Deprecated the ``Features`` functionality.
+* Issue #52: In ``VerifyingHTTPSConn``, handle a tunnelled (proxied)
+  connection.
+
+Backward-Incompatible Changes
+=============================
+
+This release includes a couple of backward-incompatible changes, but most if
+not all users will find 1.0 a drop-in replacement for 0.9.
+
+* Issue #50: Normalized API of environment marker support. Specifically,
+  removed line number and filename from SyntaxErrors when returned from
+  `pkg_resources.invalid_marker`. Any clients depending on the specific
+  string representation of exceptions returned by that function may need to
+  be updated to account for this change.
+* Issue #50: SyntaxErrors generated by `pkg_resources.invalid_marker` are
+  normalized for cross-implementation consistency.
+* Removed ``--ignore-conflicts-at-my-risk`` and ``--delete-conflicting``
+  options to easy_install. These options have been deprecated since 0.6a11.
+
+0.9.8
+-----
+
+* Issue #53: Fix NameErrors in `_vcs_split_rev_from_url`.
+
+0.9.7
+-----
+
+* Issue #49: Correct AttributeError on PyPy where a hashlib.HASH object does
+  not have a `.name` attribute.
+* Issue #34: Documentation now refers to bootstrap script in code repository
+  referenced by bookmark.
+* Add underscore-separated keys to environment markers (markerlib).
+
+0.9.6
+-----
+
+* Issue #44: Test failure on Python 2.4 when MD5 hash doesn't have a `.name`
+  attribute.
+
+0.9.5
+-----
+
+* Python #17980: Fix security vulnerability in SSL certificate validation.
+
+0.9.4
+-----
+
+* Issue #43: Fix issue (introduced in 0.9.1) with version resolution when
+  upgrading over other releases of Setuptools.
+
+0.9.3
+-----
+
+* Issue #42: Fix new ``AttributeError`` introduced in last fix.
+
+0.9.2
+-----
+
+* Issue #42: Fix regression where blank checksums would trigger an
+  ``AttributeError``.
+
+0.9.1
+-----
+
+* Distribute #386: Allow other positional and keyword arguments to os.open.
+* Corrected dependency on certifi mis-referenced in 0.9.
+
+0.9
+---
+
+* `package_index` now validates hashes other than MD5 in download links.
+
+0.8
+---
+
+* Code base now runs on Python 2.4 - Python 3.3 without Python 2to3
+  conversion.
+
+0.7.8
+-----
+
+* Distribute #375: Yet another fix for yet another regression.
+
+0.7.7
+-----
+
+* Distribute #375: Repair AttributeError created in last release (redo).
+* Issue #30: Added test for get_cache_path.
+
+0.7.6
+-----
+
+* Distribute #375: Repair AttributeError created in last release.
+
+0.7.5
+-----
+
+* Issue #21: Restore Python 2.4 compatibility in ``test_easy_install``.
+* Distribute #375: Merged additional warning from Distribute 0.6.46.
+* Now honor the environment variable
+  ``SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT`` in addition to the now
+  deprecated ``DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT``.
+
+0.7.4
+-----
+
+* Issue #20: Fix comparison of parsed SVN version on Python 3.
+
+0.7.3
+-----
+
+* Issue #1: Disable installation of Windows-specific files on non-Windows systems.
+* Use new sysconfig module with Python 2.7 or >=3.2.
+
+0.7.2
+-----
+
+* Issue #14: Use markerlib when the `parser` module is not available.
+* Issue #10: ``ez_setup.py`` now uses HTTPS to download setuptools from PyPI.
+
+0.7.1
+-----
+
+* Fix NameError (Issue #3) again - broken in bad merge.
+
+0.7
+---
+
+* Merged Setuptools and Distribute. See docs/merge.txt for details.
+
+Added several features that were slated for setuptools 0.6c12:
+
+* Index URL now defaults to HTTPS.
+* Added experimental environment marker support. Now clients may designate a
+  PEP-426 environment marker for "extra" dependencies. Setuptools uses this
+  feature in ``setup.py`` for optional SSL and certificate validation support
+  on older platforms. Based on Distutils-SIG discussions, the syntax is
+  somewhat tentative. There should probably be a PEP with a firmer spec before
+  the feature should be considered suitable for use.
+* Added support for SSL certificate validation when installing packages from
+  an HTTPS service.
+
+0.7b4
+-----
+
+* Issue #3: Fixed NameError in SSL support.
+
+0.6.49
+------
+
+* Move warning check in ``get_cache_path`` to follow the directory creation
+  to avoid errors when the cache path does not yet exist. Fixes the error
+  reported in Distribute #375.
+
+0.6.48
+------
+
+* Correct AttributeError in ``ResourceManager.get_cache_path`` introduced in
+  0.6.46 (redo).
+
+0.6.47
+------
+
+* Correct AttributeError in ``ResourceManager.get_cache_path`` introduced in
+  0.6.46.
+
+0.6.46
+------
+
+* Distribute #375: Issue a warning if the PYTHON_EGG_CACHE or otherwise
+  customized egg cache location specifies a directory that's group- or
+  world-writable.
+
+0.6.45
+------
+
+* Distribute #379: ``distribute_setup.py`` now traps VersionConflict as well,
+  restoring ability to upgrade from an older setuptools version.
+
+0.6.44
+------
+
+* ``distribute_setup.py`` has been updated to allow Setuptools 0.7 to
+  satisfy use_setuptools.
+
+0.6.43
+------
+
+* Distribute #378: Restore support for Python 2.4 Syntax (regression in 0.6.42).
+
+0.6.42
+------
+
+* External links finder no longer yields duplicate links.
+* Distribute #337: Moved site.py to setuptools/site-patch.py (graft of very old
+  patch from setuptools trunk which inspired PR #31).
+
+0.6.41
+------
+
+* Distribute #27: Use public api for loading resources from zip files rather than
+  the private method `_zip_directory_cache`.
+* Added a new function ``easy_install.get_win_launcher`` which may be used by
+  third-party libraries such as buildout to get a suitable script launcher.
+
+0.6.40
+------
+
+* Distribute #376: brought back cli.exe and gui.exe that were deleted in the
+  previous release.
+
+0.6.39
+------
+
+* Add support for console launchers on ARM platforms.
+* Fix possible issue in GUI launchers where the subsystem was not supplied to
+  the linker.
+* Launcher build script now refactored for robustness.
+* Distribute #375: Resources extracted from a zip egg to the file system now also
+  check the contents of the file against the zip contents during each
+  invocation of get_resource_filename.
+
+0.6.38
+------
+
+* Distribute #371: The launcher manifest file is now installed properly.
+
+0.6.37
+------
+
+* Distribute #143: Launcher scripts, including easy_install itself, are now
+  accompanied by a manifest on 32-bit Windows environments to avoid the
+  Installer Detection Technology and thus undesirable UAC elevation described
+  in `this Microsoft article
+  <http://technet.microsoft.com/en-us/library/cc709628%28WS.10%29.aspx>`_.
+
+0.6.36
+------
+
+* BB Pull Request #35: In Buildout #64, it was reported that
+  under Python 3, installation of distutils scripts could attempt to copy
+  the ``__pycache__`` directory as a file, causing an error, apparently only
+  under Windows. Easy_install now skips all directories when processing
+  metadata scripts.
+
+0.6.35
+------
+
+
+Note this release is backward-incompatible with distribute 0.6.23-0.6.34 in
+how it parses version numbers.
+
+* Distribute #278: Restored compatibility with distribute 0.6.22 and setuptools
+  0.6. Updated the documentation to match more closely with the version
+  parsing as intended in setuptools 0.6.
+
+0.6.34
+------
+
+* Distribute #341: 0.6.33 fails to build under Python 2.4.
+
+0.6.33
+------
+
+* Fix 2 errors with Jython 2.5.
+* Fix 1 failure with Jython 2.5 and 2.7.
+* Disable workaround for Jython scripts on Linux systems.
+* Distribute #336: `setup.py` no longer masks failure exit code when tests fail.
+* Fix issue in pkg_resources where try/except around a platform-dependent
+  import would trigger hook load failures on Mercurial. See pull request 32
+  for details.
+* Distribute #341: Fix a ResourceWarning.
+
+0.6.32
+------
+
+* Fix test suite with Python 2.6.
+* Fix some DeprecationWarnings and ResourceWarnings.
+* Distribute #335: Backed out `setup_requires` superceding installed requirements
+  until regression can be addressed.
+
+0.6.31
+------
+
+* Distribute #303: Make sure the manifest only ever contains UTF-8 in Python 3.
+* Distribute #329: Properly close files created by tests for compatibility with
+  Jython.
+* Work around Jython #1980 and Jython #1981.
+* Distribute #334: Provide workaround for packages that reference `sys.__stdout__`
+  such as numpy does. This change should address
+  `virtualenv #359 <https://github.com/pypa/virtualenv/issues/359>`_ as long
+  as the system encoding is UTF-8 or the IO encoding is specified in the
+  environment, i.e.::
+
+     PYTHONIOENCODING=utf8 pip install numpy
+
+* Fix for encoding issue when installing from Windows executable on Python 3.
+* Distribute #323: Allow `setup_requires` requirements to supercede installed
+  requirements. Added some new keyword arguments to existing pkg_resources
+  methods. Also had to updated how __path__ is handled for namespace packages
+  to ensure that when a new egg distribution containing a namespace package is
+  placed on sys.path, the entries in __path__ are found in the same order they
+  would have been in had that egg been on the path when pkg_resources was
+  first imported.
+
+0.6.30
+------
+
+* Distribute #328: Clean up temporary directories in distribute_setup.py.
+* Fix fatal bug in distribute_setup.py.
+
+0.6.29
+------
+
+* BB Pull Request #14: Honor file permissions in zip files.
+* Distribute #327: Merged pull request #24 to fix a dependency problem with pip.
+* Merged pull request #23 to fix https://github.com/pypa/virtualenv/issues/301.
+* If Sphinx is installed, the `upload_docs` command now runs `build_sphinx`
+  to produce uploadable documentation.
+* Distribute #326: `upload_docs` provided mangled auth credentials under Python 3.
+* Distribute #320: Fix check for "createable" in distribute_setup.py.
+* Distribute #305: Remove a warning that was triggered during normal operations.
+* Distribute #311: Print metadata in UTF-8 independent of platform.
+* Distribute #303: Read manifest file with UTF-8 encoding under Python 3.
+* Distribute #301: Allow to run tests of namespace packages when using 2to3.
+* Distribute #304: Prevent import loop in site.py under Python 3.3.
+* Distribute #283: Reenable scanning of `*.pyc` / `*.pyo` files on Python 3.3.
+* Distribute #299: The develop command didn't work on Python 3, when using 2to3,
+  as the egg link would go to the Python 2 source. Linking to the 2to3'd code
+  in build/lib makes it work, although you will have to rebuild the module
+  before testing it.
+* Distribute #306: Even if 2to3 is used, we build in-place under Python 2.
+* Distribute #307: Prints the full path when .svn/entries is broken.
+* Distribute #313: Support for sdist subcommands (Python 2.7)
+* Distribute #314: test_local_index() would fail an OS X.
+* Distribute #310: Non-ascii characters in a namespace __init__.py causes errors.
+* Distribute #218: Improved documentation on behavior of `package_data` and
+  `include_package_data`. Files indicated by `package_data` are now included
+  in the manifest.
+* `distribute_setup.py` now allows a `--download-base` argument for retrieving
+  distribute from a specified location.
+
+0.6.28
+------
+
+* Distribute #294: setup.py can now be invoked from any directory.
+* Scripts are now installed honoring the umask.
+* Added support for .dist-info directories.
+* Distribute #283: Fix and disable scanning of `*.pyc` / `*.pyo` files on
+  Python 3.3.
+
+0.6.27
+------
+
+* Support current snapshots of CPython 3.3.
+* Distribute now recognizes README.rst as a standard, default readme file.
+* Exclude 'encodings' modules when removing modules from sys.modules.
+  Workaround for #285.
+* Distribute #231: Don't fiddle with system python when used with buildout
+  (bootstrap.py)
+
+0.6.26
+------
+
+* Distribute #183: Symlinked files are now extracted from source distributions.
+* Distribute #227: Easy_install fetch parameters are now passed during the
+  installation of a source distribution; now fulfillment of setup_requires
+  dependencies will honor the parameters passed to easy_install.
+
+0.6.25
+------
+
+* Distribute #258: Workaround a cache issue
+* Distribute #260: distribute_setup.py now accepts the --user parameter for
+  Python 2.6 and later.
+* Distribute #262: package_index.open_with_auth no longer throws LookupError
+  on Python 3.
+* Distribute #269: AttributeError when an exception occurs reading Manifest.in
+  on late releases of Python.
+* Distribute #272: Prevent TypeError when namespace package names are unicode
+  and single-install-externally-managed is used. Also fixes PIP issue
+  449.
+* Distribute #273: Legacy script launchers now install with Python2/3 support.
+
+0.6.24
+------
+
+* Distribute #249: Added options to exclude 2to3 fixers
+
+0.6.23
+------
+
+* Distribute #244: Fixed a test
+* Distribute #243: Fixed a test
+* Distribute #239: Fixed a test
+* Distribute #240: Fixed a test
+* Distribute #241: Fixed a test
+* Distribute #237: Fixed a test
+* Distribute #238: easy_install now uses 64bit executable wrappers on 64bit Python
+* Distribute #208: Fixed parsed_versions, it now honors post-releases as noted in the documentation
+* Distribute #207: Windows cli and gui wrappers pass CTRL-C to child python process
+* Distribute #227: easy_install now passes its arguments to setup.py bdist_egg
+* Distribute #225: Fixed a NameError on Python 2.5, 2.4
+
+0.6.21
+------
+
+* Distribute #225: FIxed a regression on py2.4
+
+0.6.20
+------
+
+* Distribute #135: Include url in warning when processing URLs in package_index.
+* Distribute #212: Fix issue where easy_instal fails on Python 3 on windows installer.
+* Distribute #213: Fix typo in documentation.
+
+0.6.19
+------
+
+* Distribute #206: AttributeError: 'HTTPMessage' object has no attribute 'getheaders'
+
+0.6.18
+------
+
+* Distribute #210: Fixed a regression introduced by Distribute #204 fix.
+
+0.6.17
+------
+
+* Support 'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT' environment
+  variable to allow to disable installation of easy_install-${version} script.
+* Support Python >=3.1.4 and >=3.2.1.
+* Distribute #204: Don't try to import the parent of a namespace package in
+  declare_namespace
+* Distribute #196: Tolerate responses with multiple Content-Length headers
+* Distribute #205: Sandboxing doesn't preserve working_set. Leads to setup_requires
+  problems.
+
+0.6.16
+------
+
+* Builds sdist gztar even on Windows (avoiding Distribute #193).
+* Distribute #192: Fixed metadata omitted on Windows when package_dir
+  specified with forward-slash.
+* Distribute #195: Cython build support.
+* Distribute #200: Issues with recognizing 64-bit packages on Windows.
+
+0.6.15
+------
+
+* Fixed typo in bdist_egg
+* Several issues under Python 3 has been solved.
+* Distribute #146: Fixed missing DLL files after easy_install of windows exe package.
+
+0.6.14
+------
+
+* Distribute #170: Fixed unittest failure. Thanks to Toshio.
+* Distribute #171: Fixed race condition in unittests cause deadlocks in test suite.
+* Distribute #143: Fixed a lookup issue with easy_install.
+  Thanks to David and Zooko.
+* Distribute #174: Fixed the edit mode when its used with setuptools itself
+
+0.6.13
+------
+
+* Distribute #160: 2.7 gives ValueError("Invalid IPv6 URL")
+* Distribute #150: Fixed using ~/.local even in a --no-site-packages virtualenv
+* Distribute #163: scan index links before external links, and don't use the md5 when
+  comparing two distributions
+
+0.6.12
+------
+
+* Distribute #149: Fixed various failures on 2.3/2.4
+
+0.6.11
+------
+
+* Found another case of SandboxViolation - fixed
+* Distribute #15 and Distribute #48: Introduced a socket timeout of 15 seconds on url openings
+* Added indexsidebar.html into MANIFEST.in
+* Distribute #108: Fixed TypeError with Python3.1
+* Distribute #121: Fixed --help install command trying to actually install.
+* Distribute #112: Added an os.makedirs so that Tarek's solution will work.
+* Distribute #133: Added --no-find-links to easy_install
+* Added easy_install --user
+* Distribute #100: Fixed develop --user not taking '.' in PYTHONPATH into account
+* Distribute #134: removed spurious UserWarnings. Patch by VanLindberg
+* Distribute #138: cant_write_to_target error when setup_requires is used.
+* Distribute #147: respect the sys.dont_write_bytecode flag
+
+0.6.10
+------
+
+* Reverted change made for the DistributionNotFound exception because
+  zc.buildout uses the exception message to get the name of the
+  distribution.
+
+0.6.9
+-----
+
+* Distribute #90: unknown setuptools version can be added in the working set
+* Distribute #87: setupt.py doesn't try to convert distribute_setup.py anymore
+  Initial Patch by arfrever.
+* Distribute #89: added a side bar with a download link to the doc.
+* Distribute #86: fixed missing sentence in pkg_resources doc.
+* Added a nicer error message when a DistributionNotFound is raised.
+* Distribute #80: test_develop now works with Python 3.1
+* Distribute #93: upload_docs now works if there is an empty sub-directory.
+* Distribute #70: exec bit on non-exec files
+* Distribute #99: now the standalone easy_install command doesn't uses a
+  "setup.cfg" if any exists in the working directory. It will use it
+  only if triggered by ``install_requires`` from a setup.py call
+  (install, develop, etc).
+* Distribute #101: Allowing ``os.devnull`` in Sandbox
+* Distribute #92: Fixed the "no eggs" found error with MacPort
+  (platform.mac_ver() fails)
+* Distribute #103: test_get_script_header_jython_workaround not run
+  anymore under py3 with C or POSIX local. Contributed by Arfrever.
+* Distribute #104: remvoved the assertion when the installation fails,
+  with a nicer message for the end user.
+* Distribute #100: making sure there's no SandboxViolation when
+  the setup script patches setuptools.
+
+0.6.8
+-----
+
+* Added "check_packages" in dist. (added in Setuptools 0.6c11)
+* Fixed the DONT_PATCH_SETUPTOOLS state.
+
+0.6.7
+-----
+
+* Distribute #58: Added --user support to the develop command
+* Distribute #11: Generated scripts now wrap their call to the script entry point
+  in the standard "if name == 'main'"
+* Added the 'DONT_PATCH_SETUPTOOLS' environment variable, so virtualenv
+  can drive an installation that doesn't patch a global setuptools.
+* Reviewed unladen-swallow specific change from
+  http://code.google.com/p/unladen-swallow/source/detail?spec=svn875&r=719
+  and determined that it no longer applies. Distribute should work fine with
+  Unladen Swallow 2009Q3.
+* Distribute #21: Allow PackageIndex.open_url to gracefully handle all cases of a
+  httplib.HTTPException instead of just InvalidURL and BadStatusLine.
+* Removed virtual-python.py from this distribution and updated documentation
+  to point to the actively maintained virtualenv instead.
+* Distribute #64: use_setuptools no longer rebuilds the distribute egg every
+  time it is run
+* use_setuptools now properly respects the requested version
+* use_setuptools will no longer try to import a distribute egg for the
+  wrong Python version
+* Distribute #74: no_fake should be True by default.
+* Distribute #72: avoid a bootstrapping issue with easy_install -U
+
+0.6.6
+-----
+
+* Unified the bootstrap file so it works on both py2.x and py3k without 2to3
+  (patch by Holger Krekel)
+
+0.6.5
+-----
+
+* Distribute #65: cli.exe and gui.exe are now generated at build time,
+  depending on the platform in use.
+
+* Distribute #67: Fixed doc typo (PEP 381/PEP 382).
+
+* Distribute no longer shadows setuptools if we require a 0.7-series
+  setuptools. And an error is raised when installing a 0.7 setuptools with
+  distribute.
+
+* When run from within buildout, no attempt is made to modify an existing
+  setuptools egg, whether in a shared egg directory or a system setuptools.
+
+* Fixed a hole in sandboxing allowing builtin file to write outside of
+  the sandbox.
+
+0.6.4
+-----
+
+* Added the generation of `distribute_setup_3k.py` during the release.
+  This closes Distribute #52.
+
+* Added an upload_docs command to easily upload project documentation to
+  PyPI's https://pythonhosted.org. This close issue Distribute #56.
+
+* Fixed a bootstrap bug on the use_setuptools() API.
+
+0.6.3
+-----
+
+setuptools
+==========
+
+* Fixed a bunch of calls to file() that caused crashes on Python 3.
+
+bootstrapping
+=============
+
+* Fixed a bug in sorting that caused bootstrap to fail on Python 3.
+
+0.6.2
+-----
+
+setuptools
+==========
+
+* Added Python 3 support; see docs/python3.txt.
+  This closes Old Setuptools #39.
+
+* Added option to run 2to3 automatically when installing on Python 3.
+  This closes issue Distribute #31.
+
+* Fixed invalid usage of requirement.parse, that broke develop -d.
+  This closes Old Setuptools #44.
+
+* Fixed script launcher for 64-bit Windows.
+  This closes Old Setuptools #2.
+
+* KeyError when compiling extensions.
+  This closes Old Setuptools #41.
+
+bootstrapping
+=============
+
+* Fixed bootstrap not working on Windows. This closes issue Distribute #49.
+
+* Fixed 2.6 dependencies. This closes issue Distribute #50.
+
+* Make sure setuptools is patched when running through easy_install
+  This closes Old Setuptools #40.
+
+0.6.1
+-----
+
+setuptools
+==========
+
+* package_index.urlopen now catches BadStatusLine and malformed url errors.
+  This closes Distribute #16 and Distribute #18.
+
+* zip_ok is now False by default. This closes Old Setuptools #33.
+
+* Fixed invalid URL error catching. Old Setuptools #20.
+
+* Fixed invalid bootstraping with easy_install installation (Distribute #40).
+  Thanks to Florian Schulze for the help.
+
+* Removed buildout/bootstrap.py. A new repository will create a specific
+  bootstrap.py script.
+
+
+bootstrapping
+=============
+
+* The boostrap process leave setuptools alone if detected in the system
+  and --root or --prefix is provided, but is not in the same location.
+  This closes Distribute #10.
+
+0.6
+---
+
+setuptools
+==========
+
+* Packages required at build time where not fully present at install time.
+  This closes Distribute #12.
+
+* Protected against failures in tarfile extraction. This closes Distribute #10.
+
+* Made Jython api_tests.txt doctest compatible. This closes Distribute #7.
+
+* sandbox.py replaced builtin type file with builtin function open. This
+  closes Distribute #6.
+
+* Immediately close all file handles. This closes Distribute #3.
+
+* Added compatibility with Subversion 1.6. This references Distribute #1.
+
+pkg_resources
+=============
+
+* Avoid a call to /usr/bin/sw_vers on OSX and use the official platform API
+  instead. Based on a patch from ronaldoussoren. This closes issue #5.
+
+* Fixed a SandboxViolation for mkdir that could occur in certain cases.
+  This closes Distribute #13.
+
+* Allow to find_on_path on systems with tight permissions to fail gracefully.
+  This closes Distribute #9.
+
+* Corrected inconsistency between documentation and code of add_entry.
+  This closes Distribute #8.
+
+* Immediately close all file handles. This closes Distribute #3.
+
+easy_install
+============
+
+* Immediately close all file handles. This closes Distribute #3.
+
+0.6c9
+-----
+
+ * Fixed a missing files problem when using Windows source distributions on
+   non-Windows platforms, due to distutils not handling manifest file line
+   endings correctly.
+
+ * Updated Pyrex support to work with Pyrex 0.9.6 and higher.
+
+ * Minor changes for Jython compatibility, including skipping tests that can't
+   work on Jython.
+
+ * Fixed not installing eggs in ``install_requires`` if they were also used for
+   ``setup_requires`` or ``tests_require``.
+
+ * Fixed not fetching eggs in ``install_requires`` when running tests.
+
+ * Allow ``ez_setup.use_setuptools()`` to upgrade existing setuptools
+   installations when called from a standalone ``setup.py``.
+
+ * Added a warning if a namespace package is declared, but its parent package
+   is not also declared as a namespace.
+
+ * Support Subversion 1.5
+
+ * Removed use of deprecated ``md5`` module if ``hashlib`` is available
+
+ * Fixed ``bdist_wininst upload`` trying to upload the ``.exe`` twice
+
+ * Fixed ``bdist_egg`` putting a ``native_libs.txt`` in the source package's
+   ``.egg-info``, when it should only be in the built egg's ``EGG-INFO``.
+
+ * Ensure that _full_name is set on all shared libs before extensions are
+   checked for shared lib usage.  (Fixes a bug in the experimental shared
+   library build support.)
+
+ * Fix to allow unpacked eggs containing native libraries to fail more
+   gracefully under Google App Engine (with an ``ImportError`` loading the
+   C-based module, instead of getting a ``NameError``).
+
+0.6c7
+-----
+
+ * Fixed ``distutils.filelist.findall()`` crashing on broken symlinks, and
+   ``egg_info`` command failing on new, uncommitted SVN directories.
+
+ * Fix import problems with nested namespace packages installed via
+   ``--root`` or ``--single-version-externally-managed``, due to the
+   parent package not having the child package as an attribute.
+
+0.6c6
+-----
+
+ * Added ``--egg-path`` option to ``develop`` command, allowing you to force
+   ``.egg-link`` files to use relative paths (allowing them to be shared across
+   platforms on a networked drive).
+
+ * Fix not building binary RPMs correctly.
+
+ * Fix "eggsecutables" (such as setuptools' own egg) only being runnable with
+   bash-compatible shells.
+
+ * Fix ``#!`` parsing problems in Windows ``.exe`` script wrappers, when there
+   was whitespace inside a quoted argument or at the end of the ``#!`` line
+   (a regression introduced in 0.6c4).
+
+ * Fix ``test`` command possibly failing if an older version of the project
+   being tested was installed on ``sys.path`` ahead of the test source
+   directory.
+
+ * Fix ``find_packages()`` treating ``ez_setup`` and directories with ``.`` in
+   their names as packages.
+
+0.6c5
+-----
+
+ * Fix uploaded ``bdist_rpm`` packages being described as ``bdist_egg``
+   packages under Python versions less than 2.5.
+
+ * Fix uploaded ``bdist_wininst`` packages being described as suitable for
+   "any" version by Python 2.5, even if a ``--target-version`` was specified.
+
+0.6c4
+-----
+
+ * Overhauled Windows script wrapping to support ``bdist_wininst`` better.
+   Scripts installed with ``bdist_wininst`` will always use ``#!python.exe`` or
+   ``#!pythonw.exe`` as the executable name (even when built on non-Windows
+   platforms!), and the wrappers will look for the executable in the script's
+   parent directory (which should find the right version of Python).
+
+ * Fix ``upload`` command not uploading files built by ``bdist_rpm`` or
+   ``bdist_wininst`` under Python 2.3 and 2.4.
+
+ * Add support for "eggsecutable" headers: a ``#!/bin/sh`` script that is
+   prepended to an ``.egg`` file to allow it to be run as a script on Unix-ish
+   platforms.  (This is mainly so that setuptools itself can have a single-file
+   installer on Unix, without doing multiple downloads, dealing with firewalls,
+   etc.)
+
+ * Fix problem with empty revision numbers in Subversion 1.4 ``entries`` files
+
+ * Use cross-platform relative paths in ``easy-install.pth`` when doing
+   ``develop`` and the source directory is a subdirectory of the installation
+   target directory.
+
+ * Fix a problem installing eggs with a system packaging tool if the project
+   contained an implicit namespace package; for example if the ``setup()``
+   listed a namespace package ``foo.bar`` without explicitly listing ``foo``
+   as a namespace package.
+
+0.6c3
+-----
+
+ * Fixed breakages caused by Subversion 1.4's new "working copy" format
+
+0.6c2
+-----
+
+ * The ``ez_setup`` module displays the conflicting version of setuptools (and
+   its installation location) when a script requests a version that's not
+   available.
+
+ * Running ``setup.py develop`` on a setuptools-using project will now install
+   setuptools if needed, instead of only downloading the egg.
+
+0.6c1
+-----
+
+ * Fixed ``AttributeError`` when trying to download a ``setup_requires``
+   dependency when a distribution lacks a ``dependency_links`` setting.
+
+ * Made ``zip-safe`` and ``not-zip-safe`` flag files contain a single byte, so
+   as to play better with packaging tools that complain about zero-length
+   files.
+
+ * Made ``setup.py develop`` respect the ``--no-deps`` option, which it
+   previously was ignoring.
+
+ * Support ``extra_path`` option to ``setup()`` when ``install`` is run in
+   backward-compatibility mode.
+
+ * Source distributions now always include a ``setup.cfg`` file that explicitly
+   sets ``egg_info`` options such that they produce an identical version number
+   to the source distribution's version number.  (Previously, the default
+   version number could be different due to the use of ``--tag-date``, or if
+   the version was overridden on the command line that built the source
+   distribution.)
+
+0.6b4
+-----
+
+ * Fix ``register`` not obeying name/version set by ``egg_info`` command, if
+   ``egg_info`` wasn't explicitly run first on the same command line.
+
+ * Added ``--no-date`` and ``--no-svn-revision`` options to ``egg_info``
+   command, to allow suppressing tags configured in ``setup.cfg``.
+
+ * Fixed redundant warnings about missing ``README`` file(s); it should now
+   appear only if you are actually a source distribution.
+
+0.6b3
+-----
+
+ * Fix ``bdist_egg`` not including files in subdirectories of ``.egg-info``.
+
+ * Allow ``.py`` files found by the ``include_package_data`` option to be
+   automatically included. Remove duplicate data file matches if both
+   ``include_package_data`` and ``package_data`` are used to refer to the same
+   files.
+
+0.6b1
+-----
+
+ * Strip ``module`` from the end of compiled extension modules when computing
+   the name of a ``.py`` loader/wrapper.  (Python's import machinery ignores
+   this suffix when searching for an extension module.)
+
+0.6a11
+------
+
+ * Added ``test_loader`` keyword to support custom test loaders
+
+ * Added ``setuptools.file_finders`` entry point group to allow implementing
+   revision control plugins.
+
+ * Added ``--identity`` option to ``upload`` command.
+
+ * Added ``dependency_links`` to allow specifying URLs for ``--find-links``.
+
+ * Enhanced test loader to scan packages as well as modules, and call
+   ``additional_tests()`` if present to get non-unittest tests.
+
+ * Support namespace packages in conjunction with system packagers, by omitting
+   the installation of any ``__init__.py`` files for namespace packages, and
+   adding a special ``.pth`` file to create a working package in
+   ``sys.modules``.
+
+ * Made ``--single-version-externally-managed`` automatic when ``--root`` is
+   used, so that most system packagers won't require special support for
+   setuptools.
+
+ * Fixed ``setup_requires``, ``tests_require``, etc. not using ``setup.cfg`` or
+   other configuration files for their option defaults when installing, and
+   also made the install use ``--multi-version`` mode so that the project
+   directory doesn't need to support .pth files.
+
+ * ``MANIFEST.in`` is now forcibly closed when any errors occur while reading
+   it. Previously, the file could be left open and the actual error would be
+   masked by problems trying to remove the open file on Windows systems.
+
+0.6a10
+------
+
+ * Fixed the ``develop`` command ignoring ``--find-links``.
+
+0.6a9
+-----
+
+ * The ``sdist`` command no longer uses the traditional ``MANIFEST`` file to
+   create source distributions.  ``MANIFEST.in`` is still read and processed,
+   as are the standard defaults and pruning. But the manifest is built inside
+   the project's ``.egg-info`` directory as ``SOURCES.txt``, and it is rebuilt
+   every time the ``egg_info`` command is run.
+
+ * Added the ``include_package_data`` keyword to ``setup()``, allowing you to
+   automatically include any package data listed in revision control or
+   ``MANIFEST.in``
+
+ * Added the ``exclude_package_data`` keyword to ``setup()``, allowing you to
+   trim back files included via the ``package_data`` and
+   ``include_package_data`` options.
+
+ * Fixed ``--tag-svn-revision`` not working when run from a source
+   distribution.
+
+ * Added warning for namespace packages with missing ``declare_namespace()``
+
+ * Added ``tests_require`` keyword to ``setup()``, so that e.g. packages
+   requiring ``nose`` to run unit tests can make this dependency optional
+   unless the ``test`` command is run.
+
+ * Made all commands that use ``easy_install`` respect its configuration
+   options, as this was causing some problems with ``setup.py install``.
+
+ * Added an ``unpack_directory()`` driver to ``setuptools.archive_util``, so
+   that you can process a directory tree through a processing filter as if it
+   were a zipfile or tarfile.
+
+ * Added an internal ``install_egg_info`` command to use as part of old-style
+   ``install`` operations, that installs an ``.egg-info`` directory with the
+   package.
+
+ * Added a ``--single-version-externally-managed`` option to the ``install``
+   command so that you can more easily wrap a "flat" egg in a system package.
+
+ * Enhanced ``bdist_rpm`` so that it installs single-version eggs that
+   don't rely on a ``.pth`` file. The ``--no-egg`` option has been removed,
+   since all RPMs are now built in a more backwards-compatible format.
+
+ * Support full roundtrip translation of eggs to and from ``bdist_wininst``
+   format. Running ``bdist_wininst`` on a setuptools-based package wraps the
+   egg in an .exe that will safely install it as an egg (i.e., with metadata
+   and entry-point wrapper scripts), and ``easy_install`` can turn the .exe
+   back into an ``.egg`` file or directory and install it as such.
+
+
+0.6a8
+-----
+
+ * Fixed some problems building extensions when Pyrex was installed, especially
+   with Python 2.4 and/or packages using SWIG.
+
+ * Made ``develop`` command accept all the same options as ``easy_install``,
+   and use the ``easy_install`` command's configuration settings as defaults.
+
+ * Made ``egg_info --tag-svn-revision`` fall back to extracting the revision
+   number from ``PKG-INFO`` in case it is being run on a source distribution of
+   a snapshot taken from a Subversion-based project.
+
+ * Automatically detect ``.dll``, ``.so`` and ``.dylib`` files that are being
+   installed as data, adding them to ``native_libs.txt`` automatically.
+
+ * Fixed some problems with fresh checkouts of projects that don't include
+   ``.egg-info/PKG-INFO`` under revision control and put the project's source
+   code directly in the project directory. If such a package had any
+   requirements that get processed before the ``egg_info`` command can be run,
+   the setup scripts would fail with a "Missing 'Version:' header and/or
+   PKG-INFO file" error, because the egg runtime interpreted the unbuilt
+   metadata in a directory on ``sys.path`` (i.e. the current directory) as
+   being a corrupted egg. Setuptools now monkeypatches the distribution
+   metadata cache to pretend that the egg has valid version information, until
+   it has a chance to make it actually be so (via the ``egg_info`` command).
+
+0.6a5
+-----
+
+ * Fixed missing gui/cli .exe files in distribution. Fixed bugs in tests.
+
+0.6a3
+-----
+
+ * Added ``gui_scripts`` entry point group to allow installing GUI scripts
+   on Windows and other platforms.  (The special handling is only for Windows;
+   other platforms are treated the same as for ``console_scripts``.)
+
+0.6a2
+-----
+
+ * Added ``console_scripts`` entry point group to allow installing scripts
+   without the need to create separate script files. On Windows, console
+   scripts get an ``.exe`` wrapper so you can just type their name. On other
+   platforms, the scripts are written without a file extension.
+
+0.6a1
+-----
+
+ * Added support for building "old-style" RPMs that don't install an egg for
+   the target package, using a ``--no-egg`` option.
+
+ * The ``build_ext`` command now works better when using the ``--inplace``
+   option and multiple Python versions. It now makes sure that all extensions
+   match the current Python version, even if newer copies were built for a
+   different Python version.
+
+ * The ``upload`` command no longer attaches an extra ``.zip`` when uploading
+   eggs, as PyPI now supports egg uploads without trickery.
+
+ * The ``ez_setup`` script/module now displays a warning before downloading
+   the setuptools egg, and attempts to check the downloaded egg against an
+   internal MD5 checksum table.
+
+ * Fixed the ``--tag-svn-revision`` option of ``egg_info`` not finding the
+   latest revision number; it was using the revision number of the directory
+   containing ``setup.py``, not the highest revision number in the project.
+
+ * Added ``eager_resources`` setup argument
+
+ * The ``sdist`` command now recognizes Subversion "deleted file" entries and
+   does not include them in source distributions.
+
+ * ``setuptools`` now embeds itself more thoroughly into the distutils, so that
+   other distutils extensions (e.g. py2exe, py2app) will subclass setuptools'
+   versions of things, rather than the native distutils ones.
+
+ * Added ``entry_points`` and ``setup_requires`` arguments to ``setup()``;
+   ``setup_requires`` allows you to automatically find and download packages
+   that are needed in order to *build* your project (as opposed to running it).
+
+ * ``setuptools`` now finds its commands, ``setup()`` argument validators, and
+   metadata writers using entry points, so that they can be extended by
+   third-party packages. See `Creating distutils Extensions
+   <https://setuptools.readthedocs.io/en/latest/setuptools.html#creating-distutils-extensions>`_
+   for more details.
+
+ * The vestigial ``depends`` command has been removed. It was never finished
+   or documented, and never would have worked without EasyInstall - which it
+   pre-dated and was never compatible with.
+
+0.5a12
+------
+
+ * The zip-safety scanner now checks for modules that might be used with
+   ``python -m``, and marks them as unsafe for zipping, since Python 2.4 can't
+   handle ``-m`` on zipped modules.
+
+0.5a11
+------
+
+ * Fix breakage of the "develop" command that was caused by the addition of
+   ``--always-unzip`` to the ``easy_install`` command.
+
+0.5a9
+-----
+
+ * Include ``svn:externals`` directories in source distributions as well as
+   normal subversion-controlled files and directories.
+
+ * Added ``exclude=patternlist`` option to ``setuptools.find_packages()``
+
+ * Changed --tag-svn-revision to include an "r" in front of the revision number
+   for better readability.
+
+ * Added ability to build eggs without including source files (except for any
+   scripts, of course), using the ``--exclude-source-files`` option to
+   ``bdist_egg``.
+
+ * ``setup.py install`` now automatically detects when an "unmanaged" package
+   or module is going to be on ``sys.path`` ahead of a package being installed,
+   thereby preventing the newer version from being imported. If this occurs,
+   a warning message is output to ``sys.stderr``, but installation proceeds
+   anyway. The warning message informs the user what files or directories
+   need deleting, and advises them they can also use EasyInstall (with the
+   ``--delete-conflicting`` option) to do it automatically.
+
+ * The ``egg_info`` command now adds a ``top_level.txt`` file to the metadata
+   directory that lists all top-level modules and packages in the distribution.
+   This is used by the ``easy_install`` command to find possibly-conflicting
+   "unmanaged" packages when installing the distribution.
+
+ * Added ``zip_safe`` and ``namespace_packages`` arguments to ``setup()``.
+   Added package analysis to determine zip-safety if the ``zip_safe`` flag
+   is not given, and advise the author regarding what code might need changing.
+
+ * Fixed the swapped ``-d`` and ``-b`` options of ``bdist_egg``.
+
+0.5a8
+-----
+
+ * The "egg_info" command now always sets the distribution metadata to "safe"
+   forms of the distribution name and version, so that distribution files will
+   be generated with parseable names (i.e., ones that don't include '-' in the
+   name or version). Also, this means that if you use the various ``--tag``
+   options of "egg_info", any distributions generated will use the tags in the
+   version, not just egg distributions.
+
+ * Added support for defining command aliases in distutils configuration files,
+   under the "[aliases]" section. To prevent recursion and to allow aliases to
+   call the command of the same name, a given alias can be expanded only once
+   per command-line invocation. You can define new aliases with the "alias"
+   command, either for the local, global, or per-user configuration.
+
+ * Added "rotate" command to delete old distribution files, given a set of
+   patterns to match and the number of files to keep.  (Keeps the most
+   recently-modified distribution files matching each pattern.)
+
+ * Added "saveopts" command that saves all command-line options for the current
+   invocation to the local, global, or per-user configuration file. Useful for
+   setting defaults without having to hand-edit a configuration file.
+
+ * Added a "setopt" command that sets a single option in a specified distutils
+   configuration file.
+
+0.5a7
+-----
+
+ * Added "upload" support for egg and source distributions, including a bug
+   fix for "upload" and a temporary workaround for lack of .egg support in
+   PyPI.
+
+0.5a6
+-----
+
+ * Beefed up the "sdist" command so that if you don't have a MANIFEST.in, it
+   will include all files under revision control (CVS or Subversion) in the
+   current directory, and it will regenerate the list every time you create a
+   source distribution, not just when you tell it to. This should make the
+   default "do what you mean" more often than the distutils' default behavior
+   did, while still retaining the old behavior in the presence of MANIFEST.in.
+
+ * Fixed the "develop" command always updating .pth files, even if you
+   specified ``-n`` or ``--dry-run``.
+
+ * Slightly changed the format of the generated version when you use
+   ``--tag-build`` on the "egg_info" command, so that you can make tagged
+   revisions compare *lower* than the version specified in setup.py (e.g. by
+   using ``--tag-build=dev``).
+
+0.5a5
+-----
+
+ * Added ``develop`` command to ``setuptools``-based packages. This command
+   installs an ``.egg-link`` pointing to the package's source directory, and
+   script wrappers that ``execfile()`` the source versions of the package's
+   scripts. This lets you put your development checkout(s) on sys.path without
+   having to actually install them.  (To uninstall the link, use
+   use ``setup.py develop --uninstall``.)
+
+ * Added ``egg_info`` command to ``setuptools``-based packages. This command
+   just creates or updates the "projectname.egg-info" directory, without
+   building an egg.  (It's used by the ``bdist_egg``, ``test``, and ``develop``
+   commands.)
+
+ * Enhanced the ``test`` command so that it doesn't install the package, but
+   instead builds any C extensions in-place, updates the ``.egg-info``
+   metadata, adds the source directory to ``sys.path``, and runs the tests
+   directly on the source. This avoids an "unmanaged" installation of the
+   package to ``site-packages`` or elsewhere.
+
+ * Made ``easy_install`` a standard ``setuptools`` command, moving it from
+   the ``easy_install`` module to ``setuptools.command.easy_install``. Note
+   that if you were importing or extending it, you must now change your imports
+   accordingly.  ``easy_install.py`` is still installed as a script, but not as
+   a module.
+
+0.5a4
+-----
+
+ * Setup scripts using setuptools can now list their dependencies directly in
+   the setup.py file, without having to manually create a ``depends.txt`` file.
+   The ``install_requires`` and ``extras_require`` arguments to ``setup()``
+   are used to create a dependencies file automatically. If you are manually
+   creating ``depends.txt`` right now, please switch to using these setup
+   arguments as soon as practical, because ``depends.txt`` support will be
+   removed in the 0.6 release cycle. For documentation on the new arguments,
+   see the ``setuptools.dist.Distribution`` class.
+
+ * Setup scripts using setuptools now always install using ``easy_install``
+   internally, for ease of uninstallation and upgrading.
+
+0.5a1
+-----
+
+ * Added support for "self-installation" bootstrapping. Packages can now
+   include ``ez_setup.py`` in their source distribution, and add the following
+   to their ``setup.py``, in order to automatically bootstrap installation of
+   setuptools as part of their setup process::
+
+    from ez_setup import use_setuptools
+    use_setuptools()
+
+    from setuptools import setup
+    # etc...
+
+0.4a2
+-----
+
+ * Added ``ez_setup.py`` installer/bootstrap script to make initial setuptools
+   installation easier, and to allow distributions using setuptools to avoid
+   having to include setuptools in their source distribution.
+
+ * All downloads are now managed by the ``PackageIndex`` class (which is now
+   subclassable and replaceable), so that embedders can more easily override
+   download logic, give download progress reports, etc. The class has also
+   been moved to the new ``setuptools.package_index`` module.
+
+ * The ``Installer`` class no longer handles downloading, manages a temporary
+   directory, or tracks the ``zip_ok`` option. Downloading is now handled
+   by ``PackageIndex``, and ``Installer`` has become an ``easy_install``
+   command class based on ``setuptools.Command``.
+
+ * There is a new ``setuptools.sandbox.run_setup()`` API to invoke a setup
+   script in a directory sandbox, and a new ``setuptools.archive_util`` module
+   with an ``unpack_archive()`` API. These were split out of EasyInstall to
+   allow reuse by other tools and applications.
+
+ * ``setuptools.Command`` now supports reinitializing commands using keyword
+   arguments to set/reset options. Also, ``Command`` subclasses can now set
+   their ``command_consumes_arguments`` attribute to ``True`` in order to
+   receive an ``args`` option containing the rest of the command line.
+
+0.3a2
+-----
+
+ * Added new options to ``bdist_egg`` to allow tagging the egg's version number
+   with a subversion revision number, the current date, or an explicit tag
+   value. Run ``setup.py bdist_egg --help`` to get more information.
+
+ * Misc. bug fixes
+
+0.3a1
+-----
+
+ * Initial release.
+
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..6e0693b
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,19 @@
+Copyright (C) 2016 Jason R Coombs <jaraco@jaraco.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..325bbed
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,14 @@
+recursive-include setuptools *.py *.exe *.xml
+recursive-include tests *.py
+recursive-include setuptools/tests *.html
+recursive-include docs *.py *.txt *.conf *.css *.css_t Makefile indexsidebar.html
+recursive-include setuptools/_vendor *
+recursive-include pkg_resources *.py *.txt
+include *.py
+include *.rst
+include MANIFEST.in
+include LICENSE
+include launcher.c
+include msvc-build-launcher.cmd
+include pytest.ini
+include tox.ini
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..9556624
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,16 @@
+name: "setuptools"
+description:
+    ""
+
+third_party {
+  url {
+    type: HOMEPAGE
+    value: "https://pypi.org/project/setuptools/"
+  }
+  url {
+    type: ARCHIVE
+    value: "https://files.pythonhosted.org/packages/a6/5b/f399fcffb9128d642387133dc3aa9bb81f127b949cd4d9f63e5602ad1d71/setuptools-39.1.0.zip"
+  }
+  version: "v39.1.0"
+  last_upgrade_date { year: 2018 month: 5 day: 23 }
+}
diff --git a/MODULE_LICENSE_MIT b/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_MIT
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000..6e0693b
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,19 @@
+Copyright (C) 2016 Jason R Coombs <jaraco@jaraco.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 0000000..51e6da6
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,65 @@
+Metadata-Version: 2.1
+Name: setuptools
+Version: 39.1.0
+Summary: Easily download, build, install, upgrade, and uninstall Python packages
+Home-page: https://github.com/pypa/setuptools
+Author: Python Packaging Authority
+Author-email: distutils-sig@python.org
+License: UNKNOWN
+Project-URL: Documentation, https://setuptools.readthedocs.io/
+Description: .. image:: https://img.shields.io/pypi/v/setuptools.svg
+           :target: https://pypi.org/project/setuptools
+        
+        .. image:: https://readthedocs.org/projects/setuptools/badge/?version=latest
+            :target: https://setuptools.readthedocs.io
+        
+        .. image:: https://img.shields.io/travis/pypa/setuptools/master.svg?label=Linux%20build%20%40%20Travis%20CI
+           :target: https://travis-ci.org/pypa/setuptools
+        
+        .. image:: https://img.shields.io/appveyor/ci/jaraco/setuptools/master.svg?label=Windows%20build%20%40%20Appveyor
+           :target: https://ci.appveyor.com/project/jaraco/setuptools/branch/master
+        
+        .. image:: https://img.shields.io/pypi/pyversions/setuptools.svg
+        
+        See the `Installation Instructions
+        <https://packaging.python.org/installing/>`_ in the Python Packaging
+        User's Guide for instructions on installing, upgrading, and uninstalling
+        Setuptools.
+        
+        The project is `maintained at GitHub <https://github.com/pypa/setuptools>`_.
+        
+        Questions and comments should be directed to the `distutils-sig
+        mailing list <http://mail.python.org/pipermail/distutils-sig/>`_.
+        Bug reports and especially tested patches may be
+        submitted directly to the `bug tracker
+        <https://github.com/pypa/setuptools/issues>`_.
+        
+        
+        Code of Conduct
+        ---------------
+        
+        Everyone interacting in the setuptools project's codebases, issue trackers,
+        chat rooms, and mailing lists is expected to follow the
+        `PyPA Code of Conduct <https://www.pypa.io/en/latest/code-of-conduct/>`_.
+        
+Keywords: CPAN PyPI distutils eggs package management
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: System :: Archiving :: Packaging
+Classifier: Topic :: System :: Systems Administration
+Classifier: Topic :: Utilities
+Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*
+Description-Content-Type: text/x-rst; charset=UTF-8
+Provides-Extra: ssl
+Provides-Extra: certs
diff --git a/README.rst b/README.rst
new file mode 100755
index 0000000..f754d96
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,34 @@
+.. image:: https://img.shields.io/pypi/v/setuptools.svg
+   :target: https://pypi.org/project/setuptools
+
+.. image:: https://readthedocs.org/projects/setuptools/badge/?version=latest
+    :target: https://setuptools.readthedocs.io
+
+.. image:: https://img.shields.io/travis/pypa/setuptools/master.svg?label=Linux%20build%20%40%20Travis%20CI
+   :target: https://travis-ci.org/pypa/setuptools
+
+.. image:: https://img.shields.io/appveyor/ci/jaraco/setuptools/master.svg?label=Windows%20build%20%40%20Appveyor
+   :target: https://ci.appveyor.com/project/jaraco/setuptools/branch/master
+
+.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg
+
+See the `Installation Instructions
+<https://packaging.python.org/installing/>`_ in the Python Packaging
+User's Guide for instructions on installing, upgrading, and uninstalling
+Setuptools.
+
+The project is `maintained at GitHub <https://github.com/pypa/setuptools>`_.
+
+Questions and comments should be directed to the `distutils-sig
+mailing list <http://mail.python.org/pipermail/distutils-sig/>`_.
+Bug reports and especially tested patches may be
+submitted directly to the `bug tracker
+<https://github.com/pypa/setuptools/issues>`_.
+
+
+Code of Conduct
+---------------
+
+Everyone interacting in the setuptools project's codebases, issue trackers,
+chat rooms, and mailing lists is expected to follow the
+`PyPA Code of Conduct <https://www.pypa.io/en/latest/code-of-conduct/>`_.
diff --git a/bootstrap.py b/bootstrap.py
new file mode 100644
index 0000000..8c7d7fc
--- /dev/null
+++ b/bootstrap.py
@@ -0,0 +1,64 @@
+"""
+If setuptools is not already installed in the environment, it's not possible
+to invoke setuptools' own commands. This routine will bootstrap this local
+environment by creating a minimal egg-info directory and then invoking the
+egg-info command to flesh out the egg-info directory.
+"""
+
+from __future__ import unicode_literals
+
+import os
+import sys
+import textwrap
+import subprocess
+import io
+
+
+minimal_egg_info = textwrap.dedent("""
+    [distutils.commands]
+    egg_info = setuptools.command.egg_info:egg_info
+
+    [distutils.setup_keywords]
+    include_package_data = setuptools.dist:assert_bool
+    install_requires = setuptools.dist:check_requirements
+    extras_require = setuptools.dist:check_extras
+    entry_points = setuptools.dist:check_entry_points
+
+    [egg_info.writers]
+    dependency_links.txt = setuptools.command.egg_info:overwrite_arg
+    entry_points.txt = setuptools.command.egg_info:write_entries
+    requires.txt = setuptools.command.egg_info:write_requirements
+    """)
+
+
+def ensure_egg_info():
+    if os.path.exists('setuptools.egg-info'):
+        return
+    print("adding minimal entry_points")
+    build_egg_info()
+
+
+def build_egg_info():
+    """
+    Build a minimal egg-info, enough to invoke egg_info
+    """
+
+    os.mkdir('setuptools.egg-info')
+    with io.open('setuptools.egg-info/entry_points.txt', 'w') as ep:
+        ep.write(minimal_egg_info)
+
+
+def run_egg_info():
+    cmd = [sys.executable, 'setup.py', 'egg_info']
+    print("Regenerating egg_info")
+    subprocess.check_call(cmd)
+    print("...and again.")
+    subprocess.check_call(cmd)
+
+
+def main():
+    ensure_egg_info()
+    run_egg_info()
+
+
+__name__ == '__main__' and main()
diff --git a/conftest.py b/conftest.py
new file mode 100644
index 0000000..3cccfe1
--- /dev/null
+++ b/conftest.py
@@ -0,0 +1,8 @@
+pytest_plugins = 'setuptools.tests.fixtures'
+
+
+def pytest_addoption(parser):
+    parser.addoption(
+        "--package_name", action="append", default=[],
+        help="list of package_name to pass to test functions",
+    )
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..30bf10a
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,75 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html web pickle htmlhelp latex changes linkcheck
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html      to make standalone HTML files"
+	@echo "  pickle    to make pickle files"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  changes   to make an overview over all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+
+clean:
+	-rm -rf build/*
+
+html:
+	mkdir -p build/html build/doctrees
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html
+	@echo
+	@echo "Build finished. The HTML pages are in build/html."
+
+pickle:
+	mkdir -p build/pickle build/doctrees
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+web: pickle
+
+json:
+	mkdir -p build/json build/doctrees
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) build/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	mkdir -p build/htmlhelp build/doctrees
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in build/htmlhelp."
+
+latex:
+	mkdir -p build/latex build/doctrees
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in build/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+changes:
+	mkdir -p build/changes build/doctrees
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes
+	@echo
+	@echo "The overview file is in build/changes."
+
+linkcheck:
+	mkdir -p build/linkcheck build/doctrees
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in build/linkcheck/output.txt."
diff --git a/docs/_templates/indexsidebar.html b/docs/_templates/indexsidebar.html
new file mode 100644
index 0000000..80002d0
--- /dev/null
+++ b/docs/_templates/indexsidebar.html
@@ -0,0 +1,8 @@
+<h3>Download</h3>
+
+<p>Current version: <b>{{ version }}</b></p>
+<p>Get Setuptools from the <a href="https://pypi.org/project/setuptools/"> Python Package Index</a>
+
+<h3>Questions? Suggestions? Contributions?</h3>
+
+<p>Visit the <a href="https://github.com/pypa/setuptools">Setuptools project page</a> </p>
diff --git a/docs/_theme/nature/static/nature.css_t b/docs/_theme/nature/static/nature.css_t
new file mode 100644
index 0000000..1a65426
--- /dev/null
+++ b/docs/_theme/nature/static/nature.css_t
@@ -0,0 +1,237 @@
+/**
+ * Sphinx stylesheet -- default theme
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+ 
+@import url("basic.css");
+ 
+/* -- page layout ----------------------------------------------------------- */
+ 
+body {
+    font-family: Arial, sans-serif;
+    font-size: 100%;
+    background-color: #111111;
+    color: #555555;
+    margin: 0;
+    padding: 0;
+}
+
+div.documentwrapper {
+    float: left;
+    width: 100%;
+}
+
+div.bodywrapper {
+    margin: 0 0 0 300px;
+}
+
+hr{
+    border: 1px solid #B1B4B6;
+}
+ 
+div.document {
+    background-color: #fafafa;
+}
+ 
+div.body {
+    background-color: #ffffff;
+    color: #3E4349;
+    padding: 1em 30px 30px 30px;
+    font-size: 0.9em;
+}
+ 
+div.footer {
+    color: #555;
+    width: 100%;
+    padding: 13px 0;
+    text-align: center;
+    font-size: 75%;
+}
+ 
+div.footer a {
+    color: #444444;
+}
+ 
+div.related {
+    background-color: #6BA81E;
+    line-height: 36px;
+    color: #ffffff;
+    text-shadow: 0px 1px 0 #444444;
+    font-size: 1.1em;
+}
+ 
+div.related a {
+    color: #E2F3CC;
+}
+
+div.related .right {
+    font-size: 0.9em;
+}
+
+div.sphinxsidebar {
+    font-size: 0.9em;
+    line-height: 1.5em;
+    width: 300px;
+}
+
+div.sphinxsidebarwrapper{
+    padding: 20px 0;
+}
+ 
+div.sphinxsidebar h3,
+div.sphinxsidebar h4 {
+    font-family: Arial, sans-serif;
+    color: #222222;
+    font-size: 1.2em;
+    font-weight: bold;
+    margin: 0;
+    padding: 5px 10px;
+    text-shadow: 1px 1px 0 white
+}
+
+div.sphinxsidebar h3 a {
+    color: #444444;
+}
+
+div.sphinxsidebar p {
+    color: #888888;
+    padding: 5px 20px;
+    margin: 0.5em 0px;
+}
+ 
+div.sphinxsidebar p.topless {
+}
+ 
+div.sphinxsidebar ul {
+    margin: 10px 10px 10px 20px;
+    padding: 0;
+    color: #000000;
+}
+ 
+div.sphinxsidebar a {
+    color: #444444;
+}
+
+div.sphinxsidebar a:hover {
+    color: #E32E00;
+}
+
+div.sphinxsidebar input {
+    border: 1px solid #cccccc;
+    font-family: sans-serif;
+    font-size: 1.1em;
+    padding: 0.15em 0.3em;
+}
+
+div.sphinxsidebar input[type=text]{
+    margin-left: 20px;
+}
+ 
+/* -- body styles ----------------------------------------------------------- */
+ 
+a {
+    color: #005B81;
+    text-decoration: none;
+}
+ 
+a:hover {
+    color: #E32E00;
+}
+ 
+div.body h1,
+div.body h2,
+div.body h3,
+div.body h4,
+div.body h5,
+div.body h6 {
+    font-family: Arial, sans-serif;
+    font-weight: normal;
+    color: #212224;
+    margin: 30px 0px 10px 0px;
+    padding: 5px 0 5px 0px;
+    text-shadow: 0px 1px 0 white;
+    border-bottom: 1px solid #C8D5E3;
+}
+ 
+div.body h1 { margin-top: 0; font-size: 200%; }
+div.body h2 { font-size: 150%; }
+div.body h3 { font-size: 120%; }
+div.body h4 { font-size: 110%; }
+div.body h5 { font-size: 100%; }
+div.body h6 { font-size: 100%; }
+ 
+a.headerlink {
+    color: #c60f0f;
+    font-size: 0.8em;
+    padding: 0 4px 0 4px;
+    text-decoration: none;
+}
+ 
+a.headerlink:hover {
+    background-color: #c60f0f;
+    color: white;
+}
+ 
+div.body p, div.body dd, div.body li {
+    line-height: 1.8em;
+}
+ 
+div.admonition p.admonition-title + p {
+    display: inline;
+}
+
+div.highlight{
+    background-color: white;
+}
+
+div.note {
+    background-color: #eeeeee;
+    border: 1px solid #cccccc;
+}
+ 
+div.seealso {
+    background-color: #ffffcc;
+    border: 1px solid #ffff66;
+}
+ 
+div.topic {
+    background-color: #fafafa;
+    border-width: 0;
+}
+ 
+div.warning {
+    background-color: #ffe4e4;
+    border: 1px solid #ff6666;
+}
+ 
+p.admonition-title {
+    display: inline;
+}
+ 
+p.admonition-title:after {
+    content: ":";
+}
+ 
+pre {
+    padding: 10px;
+    background-color: #fafafa;
+    color: #222222;
+    line-height: 1.5em;
+    font-size: 1.1em;
+    margin: 1.5em 0 1.5em 0;
+    -webkit-box-shadow: 0px 0px 4px #d8d8d8;
+    -moz-box-shadow: 0px 0px 4px #d8d8d8;
+    box-shadow: 0px 0px 4px #d8d8d8;
+}
+ 
+tt {
+    color: #222222;
+    padding: 1px 2px;
+    font-size: 1.2em;
+    font-family: monospace;
+}
+
+#table-of-contents ul {
+    padding-left: 2em;
+}
+
diff --git a/docs/_theme/nature/static/pygments.css b/docs/_theme/nature/static/pygments.css
new file mode 100644
index 0000000..652b761
--- /dev/null
+++ b/docs/_theme/nature/static/pygments.css
@@ -0,0 +1,54 @@
+.c { color: #999988; font-style: italic } /* Comment */
+.k { font-weight: bold } /* Keyword */
+.o { font-weight: bold } /* Operator */
+.cm { color: #999988; font-style: italic } /* Comment.Multiline */
+.cp { color: #999999; font-weight: bold } /* Comment.preproc */
+.c1 { color: #999988; font-style: italic } /* Comment.Single */
+.gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
+.ge { font-style: italic } /* Generic.Emph */
+.gr { color: #aa0000 } /* Generic.Error */
+.gh { color: #999999 } /* Generic.Heading */
+.gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
+.go { color: #111 } /* Generic.Output */
+.gp { color: #555555 } /* Generic.Prompt */
+.gs { font-weight: bold } /* Generic.Strong */
+.gu { color: #aaaaaa } /* Generic.Subheading */
+.gt { color: #aa0000 } /* Generic.Traceback */
+.kc { font-weight: bold } /* Keyword.Constant */
+.kd { font-weight: bold } /* Keyword.Declaration */
+.kp { font-weight: bold } /* Keyword.Pseudo */
+.kr { font-weight: bold } /* Keyword.Reserved */
+.kt { color: #445588; font-weight: bold } /* Keyword.Type */
+.m { color: #009999 } /* Literal.Number */
+.s { color: #bb8844 } /* Literal.String */
+.na { color: #008080 } /* Name.Attribute */
+.nb { color: #999999 } /* Name.Builtin */
+.nc { color: #445588; font-weight: bold } /* Name.Class */
+.no { color: #ff99ff } /* Name.Constant */
+.ni { color: #800080 } /* Name.Entity */
+.ne { color: #990000; font-weight: bold } /* Name.Exception */
+.nf { color: #990000; font-weight: bold } /* Name.Function */
+.nn { color: #555555 } /* Name.Namespace */
+.nt { color: #000080 } /* Name.Tag */
+.nv { color: purple } /* Name.Variable */
+.ow { font-weight: bold } /* Operator.Word */
+.mf { color: #009999 } /* Literal.Number.Float */
+.mh { color: #009999 } /* Literal.Number.Hex */
+.mi { color: #009999 } /* Literal.Number.Integer */
+.mo { color: #009999 } /* Literal.Number.Oct */
+.sb { color: #bb8844 } /* Literal.String.Backtick */
+.sc { color: #bb8844 } /* Literal.String.Char */
+.sd { color: #bb8844 } /* Literal.String.Doc */
+.s2 { color: #bb8844 } /* Literal.String.Double */
+.se { color: #bb8844 } /* Literal.String.Escape */
+.sh { color: #bb8844 } /* Literal.String.Heredoc */
+.si { color: #bb8844 } /* Literal.String.Interpol */
+.sx { color: #bb8844 } /* Literal.String.Other */
+.sr { color: #808000 } /* Literal.String.Regex */
+.s1 { color: #bb8844 } /* Literal.String.Single */
+.ss { color: #bb8844 } /* Literal.String.Symbol */
+.bp { color: #999999 } /* Name.Builtin.Pseudo */
+.vc { color: #ff99ff } /* Name.Variable.Class */
+.vg { color: #ff99ff } /* Name.Variable.Global */
+.vi { color: #ff99ff } /* Name.Variable.Instance */
+.il { color: #009999 } /* Literal.Number.Integer.Long */
\ No newline at end of file
diff --git a/docs/_theme/nature/theme.conf b/docs/_theme/nature/theme.conf
new file mode 100644
index 0000000..1cc4004
--- /dev/null
+++ b/docs/_theme/nature/theme.conf
@@ -0,0 +1,4 @@
+[theme]
+inherit = basic
+stylesheet = nature.css
+pygments_style = tango
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..f7d0230
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,151 @@
+# -*- coding: utf-8 -*-
+#
+# Setuptools documentation build configuration file, created by
+# sphinx-quickstart on Fri Jul 17 14:22:37 2009.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# The contents of this file are pickled, so don't put values in the namespace
+# that aren't pickleable (module imports are okay, they're removed automatically).
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+
+import subprocess
+import sys
+import os
+
+
+# hack to run the bootstrap script so that jaraco.packaging.sphinx
+# can invoke setup.py
+'READTHEDOCS' in os.environ and subprocess.check_call(
+    [sys.executable, 'bootstrap.py'],
+    cwd=os.path.join(os.path.dirname(__file__), os.path.pardir),
+)
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['jaraco.packaging.sphinx', 'rst.linker', 'sphinx.ext.autosectionlabel']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.txt'
+
+# The master toctree document.
+master_doc = 'index'
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = []
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'nature'
+
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = ['_theme']
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = {'index': 'indexsidebar.html'}
+
+# If false, no module index is generated.
+html_use_modindex = False
+
+# If false, no index is generated.
+html_use_index = False
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'Setuptools.tex', 'Setuptools Documentation',
+   'The fellowship of the packaging', 'manual'),
+]
+
+link_files = {
+    '../CHANGES.rst': dict(
+        using=dict(
+            BB='https://bitbucket.org',
+            GH='https://github.com',
+        ),
+        replace=[
+            dict(
+                pattern=r'(Issue )?#(?P<issue>\d+)',
+                url='{package_url}/issues/{issue}',
+            ),
+            dict(
+                pattern=r'BB Pull Request ?#(?P<bb_pull_request>\d+)',
+                url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',
+            ),
+            dict(
+                pattern=r'Distribute #(?P<distribute>\d+)',
+                url='{BB}/tarek/distribute/issue/{distribute}',
+            ),
+            dict(
+                pattern=r'Buildout #(?P<buildout>\d+)',
+                url='{GH}/buildout/buildout/issues/{buildout}',
+            ),
+            dict(
+                pattern=r'Old Setuptools #(?P<old_setuptools>\d+)',
+                url='http://bugs.python.org/setuptools/issue{old_setuptools}',
+            ),
+            dict(
+                pattern=r'Jython #(?P<jython>\d+)',
+                url='http://bugs.jython.org/issue{jython}',
+            ),
+            dict(
+                pattern=r'Python #(?P<python>\d+)',
+                url='http://bugs.python.org/issue{python}',
+            ),
+            dict(
+                pattern=r'Interop #(?P<interop>\d+)',
+                url='{GH}/pypa/interoperability-peps/issues/{interop}',
+            ),
+            dict(
+                pattern=r'Pip #(?P<pip>\d+)',
+                url='{GH}/pypa/pip/issues/{pip}',
+            ),
+            dict(
+                pattern=r'Packaging #(?P<packaging>\d+)',
+                url='{GH}/pypa/packaging/issues/{packaging}',
+            ),
+            dict(
+                pattern=r'[Pp]ackaging (?P<packaging_ver>\d+(\.\d+)+)',
+                url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',
+            ),
+            dict(
+                pattern=r'PEP[- ](?P<pep_number>\d+)',
+                url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',
+            ),
+            dict(
+                pattern=r'setuptools_svn #(?P<setuptools_svn>\d+)',
+                url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',
+            ),
+            dict(
+                pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
+                with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n',
+            ),
+        ],
+    ),
+}
diff --git a/docs/developer-guide.txt b/docs/developer-guide.txt
new file mode 100644
index 0000000..b2c1a0c
--- /dev/null
+++ b/docs/developer-guide.txt
@@ -0,0 +1,115 @@
+================================
+Developer's Guide for Setuptools
+================================
+
+If you want to know more about contributing on Setuptools, this is the place.
+
+
+.. contents:: **Table of Contents**
+
+
+-------------------
+Recommended Reading
+-------------------
+
+Please read `How to write the perfect pull request
+<https://blog.jaraco.com/how-to-write-perfect-pull-request/>`_ for some tips
+on contributing to open source projects. Although the article is not
+authoritative, it was authored by the maintainer of Setuptools, so reflects
+his opinions and will improve the likelihood of acceptance and quality of
+contribution.
+
+------------------
+Project Management
+------------------
+
+Setuptools is maintained primarily in Github at `this home
+<https://github.com/pypa/setuptools>`_. Setuptools is maintained under the
+Python Packaging Authority (PyPA) with several core contributors. All bugs
+for Setuptools are filed and the canonical source is maintained in Github.
+
+User support and discussions are done through the issue tracker (for specific)
+issues, through the distutils-sig mailing list, or on IRC (Freenode) at
+#pypa.
+
+Discussions about development happen on the pypa-dev mailing list or on
+`Gitter <https://gitter.im/pypa/setuptools>`_.
+
+-----------------
+Authoring Tickets
+-----------------
+
+Before authoring any source code, it's often prudent to file a ticket
+describing the motivation behind making changes. First search to see if a
+ticket already exists for your issue. If not, create one. Try to think from
+the perspective of the reader. Explain what behavior you expected, what you
+got instead, and what factors might have contributed to the unexpected
+behavior. In Github, surround a block of code or traceback with the triple
+backtick "\`\`\`" so that it is formatted nicely.
+
+Filing a ticket provides a forum for justification, discussion, and
+clarification. The ticket provides a record of the purpose for the change and
+any hard decisions that were made. It provides a single place for others to
+reference when trying to understand why the software operates the way it does
+or why certain changes were made.
+
+Setuptools makes extensive use of hyperlinks to tickets in the changelog so
+that system integrators and other users can get a quick summary, but then
+jump to the in-depth discussion about any subject referenced.
+
+-----------
+Source Code
+-----------
+
+Grab the code at Github::
+
+    $ git checkout https://github.com/pypa/setuptools
+
+If you want to contribute changes, we recommend you fork the repository on
+Github, commit the changes to your repository, and then make a pull request
+on Github. If you make some changes, don't forget to:
+
+- add a note in CHANGES.rst
+
+Please commit all changes in the 'master' branch against the latest available
+commit or for bug-fixes, against an earlier commit or release in which the
+bug occurred.
+
+If you find yourself working on more than one issue at a time, Setuptools
+generally prefers Git-style branches, so use Mercurial bookmarks or Git
+branches or multiple forks to maintain separate efforts.
+
+The Continuous Integration tests that validate every release are run
+from this repository.
+
+-------
+Testing
+-------
+
+The primary tests are run using tox. To run the tests, first make
+sure you have tox installed, then invoke it::
+
+    $ tox
+
+Under continuous integration, additional tests may be run. See the
+``.travis.yml`` file for full details on the tests run under Travis-CI.
+
+-------------------
+Semantic Versioning
+-------------------
+
+Setuptools follows ``semver``.
+
+.. explain value of reflecting meaning in versions.
+
+----------------------
+Building Documentation
+----------------------
+
+Setuptools relies on the Sphinx system for building documentation.
+To accommodate RTD, docs must be built from the docs/ directory.
+
+To build them, you need to have installed the requirements specified
+in docs/requirements.txt. One way to do this is to use rwt:
+
+    setuptools/docs$ python -m rwt -r requirements.txt -- -m sphinx . html
diff --git a/docs/development.txt b/docs/development.txt
new file mode 100644
index 0000000..455f038
--- /dev/null
+++ b/docs/development.txt
@@ -0,0 +1,35 @@
+-------------------------
+Development on Setuptools
+-------------------------
+
+Setuptools is maintained by the Python community under the Python Packaging
+Authority (PyPA) and led by Jason R. Coombs.
+
+This document describes the process by which Setuptools is developed.
+This document assumes the reader has some passing familiarity with
+*using* setuptools, the ``pkg_resources`` module, and EasyInstall.  It
+does not attempt to explain basic concepts like inter-project
+dependencies, nor does it contain detailed lexical syntax for most
+file formats.  Neither does it explain concepts like "namespace
+packages" or "resources" in any detail, as all of these subjects are
+covered at length in the setuptools developer's guide and the
+``pkg_resources`` reference manual.
+
+Instead, this is **internal** documentation for how those concepts and
+features are *implemented* in concrete terms.  It is intended for people
+who are working on the setuptools code base, who want to be able to
+troubleshoot setuptools problems, want to write code that reads the file
+formats involved, or want to otherwise tinker with setuptools-generated
+files and directories.
+
+Note, however, that these are all internal implementation details and
+are therefore subject to change; stick to the published API if you don't
+want to be responsible for keeping your code from breaking when
+setuptools changes.  You have been warned.
+
+.. toctree::
+   :maxdepth: 1
+
+   developer-guide
+   formats
+   releases
diff --git a/docs/easy_install.txt b/docs/easy_install.txt
new file mode 100644
index 0000000..5c99234
--- /dev/null
+++ b/docs/easy_install.txt
@@ -0,0 +1,1622 @@
+============
+Easy Install
+============
+
+Easy Install is a python module (``easy_install``) bundled with ``setuptools``
+that lets you automatically download, build, install, and manage Python
+packages.
+
+Please share your experiences with us! If you encounter difficulty installing
+a package, please contact us via the `distutils mailing list
+<http://mail.python.org/pipermail/distutils-sig/>`_.  (Note: please DO NOT send
+private email directly to the author of setuptools; it will be discarded.  The
+mailing list is a searchable archive of previously-asked and answered
+questions; you should begin your research there before reporting something as a
+bug -- and then do so via list discussion first.)
+
+(Also, if you'd like to learn about how you can use ``setuptools`` to make your
+own packages work better with EasyInstall, or provide EasyInstall-like features
+without requiring your users to use EasyInstall directly, you'll probably want
+to check out the full `setuptools`_ documentation as well.)
+
+.. contents:: **Table of Contents**
+
+
+Using "Easy Install"
+====================
+
+
+.. _installation instructions:
+
+Installing "Easy Install"
+-------------------------
+
+Please see the `setuptools PyPI page <https://pypi.org/project/setuptools/>`_
+for download links and basic installation instructions for each of the
+supported platforms.
+
+You will need at least Python 3.3 or 2.7.  An ``easy_install`` script will be
+installed in the normal location for Python scripts on your platform.
+
+Note that the instructions on the setuptools PyPI page assume that you are
+are installing to Python's primary ``site-packages`` directory.  If this is
+not the case, you should consult the section below on `Custom Installation
+Locations`_ before installing.  (And, on Windows, you should not use the
+``.exe`` installer when installing to an alternate location.)
+
+Note that ``easy_install`` normally works by downloading files from the
+internet.  If you are behind an NTLM-based firewall that prevents Python
+programs from accessing the net directly, you may wish to first install and use
+the `APS proxy server <http://ntlmaps.sf.net/>`_, which lets you get past such
+firewalls in the same way that your web browser(s) do.
+
+(Alternately, if you do not wish easy_install to actually download anything, you
+can restrict it from doing so with the ``--allow-hosts`` option; see the
+sections on `restricting downloads with --allow-hosts`_ and `command-line
+options`_ for more details.)
+
+
+Troubleshooting
+~~~~~~~~~~~~~~~
+
+If EasyInstall/setuptools appears to install correctly, and you can run the
+``easy_install`` command but it fails with an ``ImportError``, the most likely
+cause is that you installed to a location other than ``site-packages``,
+without taking any of the steps described in the `Custom Installation
+Locations`_ section below.  Please see that section and follow the steps to
+make sure that your custom location will work correctly.  Then re-install.
+
+Similarly, if you can run ``easy_install``, and it appears to be installing
+packages, but then you can't import them, the most likely issue is that you
+installed EasyInstall correctly but are using it to install packages to a
+non-standard location that hasn't been properly prepared.  Again, see the
+section on `Custom Installation Locations`_ for more details.
+
+
+Windows Notes
+~~~~~~~~~~~~~
+
+Installing setuptools will provide an ``easy_install`` command according to
+the techniques described in `Executables and Launchers`_. If the
+``easy_install`` command is not available after installation, that section
+provides details on how to configure Windows to make the commands available.
+
+
+Downloading and Installing a Package
+------------------------------------
+
+For basic use of ``easy_install``, you need only supply the filename or URL of
+a source distribution or .egg file (`Python Egg`__).
+
+__ http://peak.telecommunity.com/DevCenter/PythonEggs
+
+**Example 1**. Install a package by name, searching PyPI for the latest
+version, and automatically downloading, building, and installing it::
+
+    easy_install SQLObject
+
+**Example 2**. Install or upgrade a package by name and version by finding
+links on a given "download page"::
+
+    easy_install -f http://pythonpaste.org/package_index.html SQLObject
+
+**Example 3**. Download a source distribution from a specified URL,
+automatically building and installing it::
+
+    easy_install http://example.com/path/to/MyPackage-1.2.3.tgz
+
+**Example 4**. Install an already-downloaded .egg file::
+
+    easy_install /my_downloads/OtherPackage-3.2.1-py2.3.egg
+
+**Example 5**.  Upgrade an already-installed package to the latest version
+listed on PyPI::
+
+    easy_install --upgrade PyProtocols
+
+**Example 6**.  Install a source distribution that's already downloaded and
+extracted in the current directory (New in 0.5a9)::
+
+    easy_install .
+
+**Example 7**.  (New in 0.6a1) Find a source distribution or Subversion
+checkout URL for a package, and extract it or check it out to
+``~/projects/sqlobject`` (the name will always be in all-lowercase), where it
+can be examined or edited.  (The package will not be installed, but it can
+easily be installed with ``easy_install ~/projects/sqlobject``.  See `Editing
+and Viewing Source Packages`_ below for more info.)::
+
+    easy_install --editable --build-directory ~/projects SQLObject
+
+**Example 7**. (New in 0.6.11) Install a distribution within your home dir::
+
+    easy_install --user SQLAlchemy
+
+Easy Install accepts URLs, filenames, PyPI package names (i.e., ``distutils``
+"distribution" names), and package+version specifiers.  In each case, it will
+attempt to locate the latest available version that meets your criteria.
+
+When downloading or processing downloaded files, Easy Install recognizes
+distutils source distribution files with extensions of .tgz, .tar, .tar.gz,
+.tar.bz2, or .zip.  And of course it handles already-built .egg
+distributions as well as ``.win32.exe`` installers built using distutils.
+
+By default, packages are installed to the running Python installation's
+``site-packages`` directory, unless you provide the ``-d`` or ``--install-dir``
+option to specify an alternative directory, or specify an alternate location
+using distutils configuration files.  (See `Configuration Files`_, below.)
+
+By default, any scripts included with the package are installed to the running
+Python installation's standard script installation location.  However, if you
+specify an installation directory via the command line or a config file, then
+the default directory for installing scripts will be the same as the package
+installation directory, to ensure that the script will have access to the
+installed package.  You can override this using the ``-s`` or ``--script-dir``
+option.
+
+Installed packages are added to an ``easy-install.pth`` file in the install
+directory, so that Python will always use the most-recently-installed version
+of the package.  If you would like to be able to select which version to use at
+runtime, you should use the ``-m`` or ``--multi-version`` option.
+
+
+Upgrading a Package
+-------------------
+
+You don't need to do anything special to upgrade a package: just install the
+new version, either by requesting a specific version, e.g.::
+
+    easy_install "SomePackage==2.0"
+
+a version greater than the one you have now::
+
+    easy_install "SomePackage>2.0"
+
+using the upgrade flag, to find the latest available version on PyPI::
+
+    easy_install --upgrade SomePackage
+
+or by using a download page, direct download URL, or package filename::
+
+    easy_install -f http://example.com/downloads ExamplePackage
+
+    easy_install http://example.com/downloads/ExamplePackage-2.0-py2.4.egg
+
+    easy_install my_downloads/ExamplePackage-2.0.tgz
+
+If you're using ``-m`` or ``--multi-version`` , using the ``require()``
+function at runtime automatically selects the newest installed version of a
+package that meets your version criteria.  So, installing a newer version is
+the only step needed to upgrade such packages.
+
+If you're installing to a directory on PYTHONPATH, or a configured "site"
+directory (and not using ``-m``), installing a package automatically replaces
+any previous version in the ``easy-install.pth`` file, so that Python will
+import the most-recently installed version by default.  So, again, installing
+the newer version is the only upgrade step needed.
+
+If you haven't suppressed script installation (using ``--exclude-scripts`` or
+``-x``), then the upgraded version's scripts will be installed, and they will
+be automatically patched to ``require()`` the corresponding version of the
+package, so that you can use them even if they are installed in multi-version
+mode.
+
+``easy_install`` never actually deletes packages (unless you're installing a
+package with the same name and version number as an existing package), so if
+you want to get rid of older versions of a package, please see `Uninstalling
+Packages`_, below.
+
+
+Changing the Active Version
+---------------------------
+
+If you've upgraded a package, but need to revert to a previously-installed
+version, you can do so like this::
+
+    easy_install PackageName==1.2.3
+
+Where ``1.2.3`` is replaced by the exact version number you wish to switch to.
+If a package matching the requested name and version is not already installed
+in a directory on ``sys.path``, it will be located via PyPI and installed.
+
+If you'd like to switch to the latest installed version of ``PackageName``, you
+can do so like this::
+
+    easy_install PackageName
+
+This will activate the latest installed version.  (Note: if you have set any
+``find_links`` via distutils configuration files, those download pages will be
+checked for the latest available version of the package, and it will be
+downloaded and installed if it is newer than your current version.)
+
+Note that changing the active version of a package will install the newly
+active version's scripts, unless the ``--exclude-scripts`` or ``-x`` option is
+specified.
+
+
+Uninstalling Packages
+---------------------
+
+If you have replaced a package with another version, then you can just delete
+the package(s) you don't need by deleting the PackageName-versioninfo.egg file
+or directory (found in the installation directory).
+
+If you want to delete the currently installed version of a package (or all
+versions of a package), you should first run::
+
+    easy_install -m PackageName
+
+This will ensure that Python doesn't continue to search for a package you're
+planning to remove. After you've done this, you can safely delete the .egg
+files or directories, along with any scripts you wish to remove.
+
+
+Managing Scripts
+----------------
+
+Whenever you install, upgrade, or change versions of a package, EasyInstall
+automatically installs the scripts for the selected package version, unless
+you tell it not to with ``-x`` or ``--exclude-scripts``.  If any scripts in
+the script directory have the same name, they are overwritten.
+
+Thus, you do not normally need to manually delete scripts for older versions of
+a package, unless the newer version of the package does not include a script
+of the same name.  However, if you are completely uninstalling a package, you
+may wish to manually delete its scripts.
+
+EasyInstall's default behavior means that you can normally only run scripts
+from one version of a package at a time.  If you want to keep multiple versions
+of a script available, however, you can simply use the ``--multi-version`` or
+``-m`` option, and rename the scripts that EasyInstall creates.  This works
+because EasyInstall installs scripts as short code stubs that ``require()`` the
+matching version of the package the script came from, so renaming the script
+has no effect on what it executes.
+
+For example, suppose you want to use two versions of the ``rst2html`` tool
+provided by the `docutils <http://docutils.sf.net/>`_ package.  You might
+first install one version::
+
+    easy_install -m docutils==0.3.9
+
+then rename the ``rst2html.py`` to ``r2h_039``, and install another version::
+
+    easy_install -m docutils==0.3.10
+
+This will create another ``rst2html.py`` script, this one using docutils
+version 0.3.10 instead of 0.3.9.  You now have two scripts, each using a
+different version of the package.  (Notice that we used ``-m`` for both
+installations, so that Python won't lock us out of using anything but the most
+recently-installed version of the package.)
+
+
+Executables and Launchers
+-------------------------
+
+On Unix systems, scripts are installed with as natural files with a "#!"
+header and no extension and they launch under the Python version indicated in
+the header.
+
+On Windows, there is no mechanism to "execute" files without extensions, so
+EasyInstall provides two techniques to mirror the Unix behavior. The behavior
+is indicated by the SETUPTOOLS_LAUNCHER environment variable, which may be
+"executable" (default) or "natural".
+
+Regardless of the technique used, the script(s) will be installed to a Scripts
+directory (by default in the Python installation directory). It is recommended
+for EasyInstall that you ensure this directory is in the PATH environment
+variable. The easiest way to ensure the Scripts directory is in the PATH is
+to run ``Tools\Scripts\win_add2path.py`` from the Python directory.
+
+Note that instead of changing your ``PATH`` to include the Python scripts
+directory, you can also retarget the installation location for scripts so they
+go on a directory that's already on the ``PATH``.  For more information see
+`Command-Line Options`_ and `Configuration Files`_.  During installation,
+pass command line options (such as ``--script-dir``) to
+``ez_setup.py`` to control where ``easy_install.exe`` will be installed.
+
+
+Windows Executable Launcher
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If the "executable" launcher is used, EasyInstall will create a '.exe'
+launcher of the same name beside each installed script (including
+``easy_install`` itself). These small .exe files launch the script of the
+same name using the Python version indicated in the '#!' header.
+
+This behavior is currently default. To force
+the use of executable launchers, set ``SETUPTOOLS_LAUNCHER`` to "executable".
+
+Natural Script Launcher
+~~~~~~~~~~~~~~~~~~~~~~~
+
+EasyInstall also supports deferring to an external launcher such as
+`pylauncher <https://bitbucket.org/pypa/pylauncher>`_ for launching scripts.
+Enable this experimental functionality by setting the
+``SETUPTOOLS_LAUNCHER`` environment variable to "natural". EasyInstall will
+then install scripts as simple
+scripts with a .pya (or .pyw) extension appended. If these extensions are
+associated with the pylauncher and listed in the PATHEXT environment variable,
+these scripts can then be invoked simply and directly just like any other
+executable. This behavior may become default in a future version.
+
+EasyInstall uses the .pya extension instead of simply
+the typical '.py' extension. This distinct extension is necessary to prevent
+Python
+from treating the scripts as importable modules (where name conflicts exist).
+Current releases of pylauncher do not yet associate with .pya files by
+default, but future versions should do so.
+
+
+Tips & Techniques
+-----------------
+
+Multiple Python Versions
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+EasyInstall installs itself under two names:
+``easy_install`` and ``easy_install-N.N``, where ``N.N`` is the Python version
+used to install it.  Thus, if you install EasyInstall for both Python 3.2 and
+2.7, you can use the ``easy_install-3.2`` or ``easy_install-2.7`` scripts to
+install packages for the respective Python version.
+
+Setuptools also supplies easy_install as a runnable module which may be
+invoked using ``python -m easy_install`` for any Python with Setuptools
+installed.
+
+Restricting Downloads with ``--allow-hosts``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can use the ``--allow-hosts`` (``-H``) option to restrict what domains
+EasyInstall will look for links and downloads on.  ``--allow-hosts=None``
+prevents downloading altogether.  You can also use wildcards, for example
+to restrict downloading to hosts in your own intranet.  See the section below
+on `Command-Line Options`_ for more details on the ``--allow-hosts`` option.
+
+By default, there are no host restrictions in effect, but you can change this
+default by editing the appropriate `configuration files`_ and adding:
+
+.. code-block:: ini
+
+    [easy_install]
+    allow_hosts = *.myintranet.example.com,*.python.org
+
+The above example would then allow downloads only from hosts in the
+``python.org`` and ``myintranet.example.com`` domains, unless overridden on the
+command line.
+
+
+Installing on Un-networked Machines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Just copy the eggs or source packages you need to a directory on the target
+machine, then use the ``-f`` or ``--find-links`` option to specify that
+directory's location.  For example::
+
+    easy_install -H None -f somedir SomePackage
+
+will attempt to install SomePackage using only eggs and source packages found
+in ``somedir`` and disallowing all remote access.  You should of course make
+sure you have all of SomePackage's dependencies available in somedir.
+
+If you have another machine of the same operating system and library versions
+(or if the packages aren't platform-specific), you can create the directory of
+eggs using a command like this::
+
+    easy_install -zmaxd somedir SomePackage
+
+This will tell EasyInstall to put zipped eggs or source packages for
+SomePackage and all its dependencies into ``somedir``, without creating any
+scripts or .pth files.  You can then copy the contents of ``somedir`` to the
+target machine.  (``-z`` means zipped eggs, ``-m`` means multi-version, which
+prevents .pth files from being used, ``-a`` means to copy all the eggs needed,
+even if they're installed elsewhere on the machine, and ``-d`` indicates the
+directory to place the eggs in.)
+
+You can also build the eggs from local development packages that were installed
+with the ``setup.py develop`` command, by including the ``-l`` option, e.g.::
+
+    easy_install -zmaxld somedir SomePackage
+
+This will use locally-available source distributions to build the eggs.
+
+
+Packaging Others' Projects As Eggs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Need to distribute a package that isn't published in egg form?  You can use
+EasyInstall to build eggs for a project.  You'll want to use the ``--zip-ok``,
+``--exclude-scripts``, and possibly ``--no-deps`` options (``-z``, ``-x`` and
+``-N``, respectively).  Use ``-d`` or ``--install-dir`` to specify the location
+where you'd like the eggs placed.  By placing them in a directory that is
+published to the web, you can then make the eggs available for download, either
+in an intranet or to the internet at large.
+
+If someone distributes a package in the form of a single ``.py`` file, you can
+wrap it in an egg by tacking an ``#egg=name-version`` suffix on the file's URL.
+So, something like this::
+
+    easy_install -f "http://some.example.com/downloads/foo.py#egg=foo-1.0" foo
+
+will install the package as an egg, and this::
+
+    easy_install -zmaxd. \
+        -f "http://some.example.com/downloads/foo.py#egg=foo-1.0" foo
+
+will create a ``.egg`` file in the current directory.
+
+
+Creating your own Package Index
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In addition to local directories and the Python Package Index, EasyInstall can
+find download links on most any web page whose URL is given to the ``-f``
+(``--find-links``) option.  In the simplest case, you can simply have a web
+page with links to eggs or Python source packages, even an automatically
+generated directory listing (such as the Apache web server provides).
+
+If you are setting up an intranet site for package downloads, you may want to
+configure the target machines to use your download site by default, adding
+something like this to their `configuration files`_:
+
+.. code-block:: ini
+
+    [easy_install]
+    find_links = http://mypackages.example.com/somedir/
+                 http://turbogears.org/download/
+                 http://peak.telecommunity.com/dist/
+
+As you can see, you can list multiple URLs separated by whitespace, continuing
+on multiple lines if necessary (as long as the subsequent lines are indented.
+
+If you are more ambitious, you can also create an entirely custom package index
+or PyPI mirror.  See the ``--index-url`` option under `Command-Line Options`_,
+below, and also the section on `Package Index "API"`_.
+
+
+Password-Protected Sites
+------------------------
+
+If a site you want to download from is password-protected using HTTP "Basic"
+authentication, you can specify your credentials in the URL, like so::
+
+    http://some_userid:some_password@some.example.com/some_path/
+
+You can do this with both index page URLs and direct download URLs.  As long
+as any HTML pages read by easy_install use *relative* links to point to the
+downloads, the same user ID and password will be used to do the downloading.
+
+Using .pypirc Credentials
+-------------------------
+
+In additional to supplying credentials in the URL, ``easy_install`` will also
+honor credentials if present in the .pypirc file. Teams maintaining a private
+repository of packages may already have defined access credentials for
+uploading packages according to the distutils documentation. ``easy_install``
+will attempt to honor those if present. Refer to the distutils documentation
+for Python 2.5 or later for details on the syntax.
+
+Controlling Build Options
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+EasyInstall respects standard distutils `Configuration Files`_, so you can use
+them to configure build options for packages that it installs from source.  For
+example, if you are on Windows using the MinGW compiler, you can configure the
+default compiler by putting something like this:
+
+.. code-block:: ini
+
+    [build]
+    compiler = mingw32
+
+into the appropriate distutils configuration file.  In fact, since this is just
+normal distutils configuration, it will affect any builds using that config
+file, not just ones done by EasyInstall.  For example, if you add those lines
+to ``distutils.cfg`` in the ``distutils`` package directory, it will be the
+default compiler for *all* packages you build.  See `Configuration Files`_
+below for a list of the standard configuration file locations, and links to
+more documentation on using distutils configuration files.
+
+
+Editing and Viewing Source Packages
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sometimes a package's source distribution  contains additional documentation,
+examples, configuration files, etc., that are not part of its actual code.  If
+you want to be able to examine these files, you can use the ``--editable``
+option to EasyInstall, and EasyInstall will look for a source distribution
+or Subversion URL for the package, then download and extract it or check it out
+as a subdirectory of the ``--build-directory`` you specify.  If you then wish
+to install the package after editing or configuring it, you can do so by
+rerunning EasyInstall with that directory as the target.
+
+Note that using ``--editable`` stops EasyInstall from actually building or
+installing the package; it just finds, obtains, and possibly unpacks it for
+you.  This allows you to make changes to the package if necessary, and to
+either install it in development mode using ``setup.py develop`` (if the
+package uses setuptools, that is), or by running ``easy_install projectdir``
+(where ``projectdir`` is the subdirectory EasyInstall created for the
+downloaded package.
+
+In order to use ``--editable`` (``-e`` for short), you *must* also supply a
+``--build-directory`` (``-b`` for short).  The project will be placed in a
+subdirectory of the build directory.  The subdirectory will have the same
+name as the project itself, but in all-lowercase.  If a file or directory of
+that name already exists, EasyInstall will print an error message and exit.
+
+Also, when using ``--editable``, you cannot use URLs or filenames as arguments.
+You *must* specify project names (and optional version requirements) so that
+EasyInstall knows what directory name(s) to create.  If you need to force
+EasyInstall to use a particular URL or filename, you should specify it as a
+``--find-links`` item (``-f`` for short), and then also specify
+the project name, e.g.::
+
+    easy_install -eb ~/projects \
+     -fhttp://prdownloads.sourceforge.net/ctypes/ctypes-0.9.6.tar.gz?download \
+     ctypes==0.9.6
+
+
+Dealing with Installation Conflicts
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+(NOTE: As of 0.6a11, this section is obsolete; it is retained here only so that
+people using older versions of EasyInstall can consult it.  As of version
+0.6a11, installation conflicts are handled automatically without deleting the
+old or system-installed packages, and without ignoring the issue.  Instead,
+eggs are automatically shifted to the front of ``sys.path`` using special
+code added to the ``easy-install.pth`` file.  So, if you are using version
+0.6a11 or better of setuptools, you do not need to worry about conflicts,
+and the following issues do not apply to you.)
+
+EasyInstall installs distributions in a "managed" way, such that each
+distribution can be independently activated or deactivated on ``sys.path``.
+However, packages that were not installed by EasyInstall are "unmanaged",
+in that they usually live all in one directory and cannot be independently
+activated or deactivated.
+
+As a result, if you are using EasyInstall to upgrade an existing package, or
+to install a package with the same name as an existing package, EasyInstall
+will warn you of the conflict.  (This is an improvement over ``setup.py
+install``, because the ``distutils`` just install new packages on top of old
+ones, possibly combining two unrelated packages or leaving behind modules that
+have been deleted in the newer version of the package.)
+
+EasyInstall will stop the installation if it detects a conflict
+between an existing, "unmanaged" package, and a module or package in any of
+the distributions you're installing.  It will display a list of all of the
+existing files and directories that would need to be deleted for the new
+package to be able to function correctly.  To proceed, you must manually
+delete these conflicting files and directories and re-run EasyInstall.
+
+Of course, once you've replaced all of your existing "unmanaged" packages with
+versions managed by EasyInstall, you won't have any more conflicts to worry
+about!
+
+
+Compressed Installation
+~~~~~~~~~~~~~~~~~~~~~~~
+
+EasyInstall tries to install packages in zipped form, if it can.  Zipping
+packages can improve Python's overall import performance if you're not using
+the ``--multi-version`` option, because Python processes zipfile entries on
+``sys.path`` much faster than it does directories.
+
+As of version 0.5a9, EasyInstall analyzes packages to determine whether they
+can be safely installed as a zipfile, and then acts on its analysis.  (Previous
+versions would not install a package as a zipfile unless you used the
+``--zip-ok`` option.)
+
+The current analysis approach is fairly conservative; it currently looks for:
+
+ * Any use of the ``__file__`` or ``__path__`` variables (which should be
+   replaced with ``pkg_resources`` API calls)
+
+ * Possible use of ``inspect`` functions that expect to manipulate source files
+   (e.g. ``inspect.getsource()``)
+
+ * Top-level modules that might be scripts used with ``python -m`` (Python 2.4)
+
+If any of the above are found in the package being installed, EasyInstall will
+assume that the package cannot be safely run from a zipfile, and unzip it to
+a directory instead.  You can override this analysis with the ``-zip-ok`` flag,
+which will tell EasyInstall to install the package as a zipfile anyway.  Or,
+you can use the ``--always-unzip`` flag, in which case EasyInstall will always
+unzip, even if its analysis says the package is safe to run as a zipfile.
+
+Normally, however, it is simplest to let EasyInstall handle the determination
+of whether to zip or unzip, and only specify overrides when needed to work
+around a problem.  If you find you need to override EasyInstall's guesses, you
+may want to contact the package author and the EasyInstall maintainers, so that
+they can make appropriate changes in future versions.
+
+(Note: If a package uses ``setuptools`` in its setup script, the package author
+has the option to declare the package safe or unsafe for zipped usage via the
+``zip_safe`` argument to ``setup()``.  If the package author makes such a
+declaration, EasyInstall believes the package's author and does not perform its
+own analysis.  However, your command-line option, if any, will still override
+the package author's choice.)
+
+
+Reference Manual
+================
+
+Configuration Files
+-------------------
+
+(New in 0.4a2)
+
+You may specify default options for EasyInstall using the standard
+distutils configuration files, under the command heading ``easy_install``.
+EasyInstall will look first for a ``setup.cfg`` file in the current directory,
+then a ``~/.pydistutils.cfg`` or ``$HOME\\pydistutils.cfg`` (on Unix-like OSes
+and Windows, respectively), and finally a ``distutils.cfg`` file in the
+``distutils`` package directory.  Here's a simple example:
+
+.. code-block:: ini
+
+    [easy_install]
+
+    # set the default location to install packages
+    install_dir = /home/me/lib/python
+
+    # Notice that indentation can be used to continue an option
+    # value; this is especially useful for the "--find-links"
+    # option, which tells easy_install to use download links on
+    # these pages before consulting PyPI:
+    #
+    find_links = http://sqlobject.org/
+                 http://peak.telecommunity.com/dist/
+
+In addition to accepting configuration for its own options under
+``[easy_install]``, EasyInstall also respects defaults specified for other
+distutils commands.  For example, if you don't set an ``install_dir`` for
+``[easy_install]``, but *have* set an ``install_lib`` for the ``[install]``
+command, this will become EasyInstall's default installation directory.  Thus,
+if you are already using distutils configuration files to set default install
+locations, build options, etc., EasyInstall will respect your existing settings
+until and unless you override them explicitly in an ``[easy_install]`` section.
+
+For more information, see also the current Python documentation on the `use and
+location of distutils configuration files <https://docs.python.org/install/index.html#inst-config-files>`_.
+
+Notice that ``easy_install`` will use the ``setup.cfg`` from the current
+working directory only if it was triggered from ``setup.py`` through the
+``install_requires`` option. The standalone command will not use that file.
+
+Command-Line Options
+--------------------
+
+``--zip-ok, -z``
+    Install all packages as zip files, even if they are marked as unsafe for
+    running as a zipfile.  This can be useful when EasyInstall's analysis
+    of a non-setuptools package is too conservative, but keep in mind that
+    the package may not work correctly.  (Changed in 0.5a9; previously this
+    option was required in order for zipped installation to happen at all.)
+
+``--always-unzip, -Z``
+    Don't install any packages as zip files, even if the packages are marked
+    as safe for running as a zipfile.  This can be useful if a package does
+    something unsafe, but not in a way that EasyInstall can easily detect.
+    EasyInstall's default analysis is currently very conservative, however, so
+    you should only use this option if you've had problems with a particular
+    package, and *after* reporting the problem to the package's maintainer and
+    to the EasyInstall maintainers.
+
+    (Note: the ``-z/-Z`` options only affect the installation of newly-built
+    or downloaded packages that are not already installed in the target
+    directory; if you want to convert an existing installed version from
+    zipped to unzipped or vice versa, you'll need to delete the existing
+    version first, and re-run EasyInstall.)
+
+``--multi-version, -m``
+    "Multi-version" mode. Specifying this option prevents ``easy_install`` from
+    adding an ``easy-install.pth`` entry for the package being installed, and
+    if an entry for any version the package already exists, it will be removed
+    upon successful installation. In multi-version mode, no specific version of
+    the package is available for importing, unless you use
+    ``pkg_resources.require()`` to put it on ``sys.path``. This can be as
+    simple as::
+
+        from pkg_resources import require
+        require("SomePackage", "OtherPackage", "MyPackage")
+
+    which will put the latest installed version of the specified packages on
+    ``sys.path`` for you. (For more advanced uses, like selecting specific
+    versions and enabling optional dependencies, see the ``pkg_resources`` API
+    doc.)
+
+    Changed in 0.6a10: this option is no longer silently enabled when
+    installing to a non-PYTHONPATH, non-"site" directory.  You must always
+    explicitly use this option if you want it to be active.
+
+``--upgrade, -U``   (New in 0.5a4)
+    By default, EasyInstall only searches online if a project/version
+    requirement can't be met by distributions already installed
+    on sys.path or the installation directory.  However, if you supply the
+    ``--upgrade`` or ``-U`` flag, EasyInstall will always check the package
+    index and ``--find-links`` URLs before selecting a version to install.  In
+    this way, you can force EasyInstall to use the latest available version of
+    any package it installs (subject to any version requirements that might
+    exclude such later versions).
+
+``--install-dir=DIR, -d DIR``
+    Set the installation directory. It is up to you to ensure that this
+    directory is on ``sys.path`` at runtime, and to use
+    ``pkg_resources.require()`` to enable the installed package(s) that you
+    need.
+
+    (New in 0.4a2) If this option is not directly specified on the command line
+    or in a distutils configuration file, the distutils default installation
+    location is used.  Normally, this would be the ``site-packages`` directory,
+    but if you are using distutils configuration files, setting things like
+    ``prefix`` or ``install_lib``, then those settings are taken into
+    account when computing the default installation directory, as is the
+    ``--prefix`` option.
+
+``--script-dir=DIR, -s DIR``
+    Set the script installation directory.  If you don't supply this option
+    (via the command line or a configuration file), but you *have* supplied
+    an ``--install-dir`` (via command line or config file), then this option
+    defaults to the same directory, so that the scripts will be able to find
+    their associated package installation.  Otherwise, this setting defaults
+    to the location where the distutils would normally install scripts, taking
+    any distutils configuration file settings into account.
+
+``--exclude-scripts, -x``
+    Don't install scripts.  This is useful if you need to install multiple
+    versions of a package, but do not want to reset the version that will be
+    run by scripts that are already installed.
+
+``--user`` (New in 0.6.11)
+    Use the user-site-packages as specified in :pep:`370`
+    instead of the global site-packages.
+
+``--always-copy, -a``   (New in 0.5a4)
+    Copy all needed distributions to the installation directory, even if they
+    are already present in a directory on sys.path.  In older versions of
+    EasyInstall, this was the default behavior, but now you must explicitly
+    request it.  By default, EasyInstall will no longer copy such distributions
+    from other sys.path directories to the installation directory, unless you
+    explicitly gave the distribution's filename on the command line.
+
+    Note that as of 0.6a10, using this option excludes "system" and
+    "development" eggs from consideration because they can't be reliably
+    copied.  This may cause EasyInstall to choose an older version of a package
+    than what you expected, or it may cause downloading and installation of a
+    fresh copy of something that's already installed.  You will see warning
+    messages for any eggs that EasyInstall skips, before it falls back to an
+    older version or attempts to download a fresh copy.
+
+``--find-links=URLS_OR_FILENAMES, -f URLS_OR_FILENAMES``
+    Scan the specified "download pages" or directories for direct links to eggs
+    or other distributions.  Any existing file or directory names or direct
+    download URLs are immediately added to EasyInstall's search cache, and any
+    indirect URLs (ones that don't point to eggs or other recognized archive
+    formats) are added to a list of additional places to search for download
+    links.  As soon as EasyInstall has to go online to find a package (either
+    because it doesn't exist locally, or because ``--upgrade`` or ``-U`` was
+    used), the specified URLs will be downloaded and scanned for additional
+    direct links.
+
+    Eggs and archives found by way of ``--find-links`` are only downloaded if
+    they are needed to meet a requirement specified on the command line; links
+    to unneeded packages are ignored.
+
+    If all requested packages can be found using links on the specified
+    download pages, the Python Package Index will not be consulted unless you
+    also specified the ``--upgrade`` or ``-U`` option.
+
+    (Note: if you want to refer to a local HTML file containing links, you must
+    use a ``file:`` URL, as filenames that do not refer to a directory, egg, or
+    archive are ignored.)
+
+    You may specify multiple URLs or file/directory names with this option,
+    separated by whitespace.  Note that on the command line, you will probably
+    have to surround the URL list with quotes, so that it is recognized as a
+    single option value.  You can also specify URLs in a configuration file;
+    see `Configuration Files`_, above.
+
+    Changed in 0.6a10: previously all URLs and directories passed to this
+    option were scanned as early as possible, but from 0.6a10 on, only
+    directories and direct archive links are scanned immediately; URLs are not
+    retrieved unless a package search was already going to go online due to a
+    package not being available locally, or due to the use of the ``--update``
+    or ``-U`` option.
+
+``--no-find-links`` Blocks the addition of any link.
+    This parameter is useful if you want to avoid adding links defined in a
+    project easy_install is installing (whether it's a requested project or a
+    dependency). When used, ``--find-links`` is ignored.
+
+    Added in Distribute 0.6.11 and Setuptools 0.7.
+
+``--index-url=URL, -i URL`` (New in 0.4a1; default changed in 0.6c7)
+    Specifies the base URL of the Python Package Index.  The default is
+    https://pypi.org/simple/ if not specified.  When a package is requested
+    that is not locally available or linked from a ``--find-links`` download
+    page, the package index will be searched for download pages for the needed
+    package, and those download pages will be searched for links to download
+    an egg or source distribution.
+
+``--editable, -e`` (New in 0.6a1)
+    Only find and download source distributions for the specified projects,
+    unpacking them to subdirectories of the specified ``--build-directory``.
+    EasyInstall will not actually build or install the requested projects or
+    their dependencies; it will just find and extract them for you.  See
+    `Editing and Viewing Source Packages`_ above for more details.
+
+``--build-directory=DIR, -b DIR`` (UPDATED in 0.6a1)
+    Set the directory used to build source packages.  If a package is built
+    from a source distribution or checkout, it will be extracted to a
+    subdirectory of the specified directory.  The subdirectory will have the
+    same name as the extracted distribution's project, but in all-lowercase.
+    If a file or directory of that name already exists in the given directory,
+    a warning will be printed to the console, and the build will take place in
+    a temporary directory instead.
+
+    This option is most useful in combination with the ``--editable`` option,
+    which forces EasyInstall to *only* find and extract (but not build and
+    install) source distributions.  See `Editing and Viewing Source Packages`_,
+    above, for more information.
+
+``--verbose, -v, --quiet, -q`` (New in 0.4a4)
+    Control the level of detail of EasyInstall's progress messages.  The
+    default detail level is "info", which prints information only about
+    relatively time-consuming operations like running a setup script, unpacking
+    an archive, or retrieving a URL.  Using ``-q`` or ``--quiet`` drops the
+    detail level to "warn", which will only display installation reports,
+    warnings, and errors.  Using ``-v`` or ``--verbose`` increases the detail
+    level to include individual file-level operations, link analysis messages,
+    and distutils messages from any setup scripts that get run.  If you include
+    the ``-v`` option more than once, the second and subsequent uses are passed
+    down to any setup scripts, increasing the verbosity of their reporting as
+    well.
+
+``--dry-run, -n`` (New in 0.4a4)
+    Don't actually install the package or scripts.  This option is passed down
+    to any setup scripts run, so packages should not actually build either.
+    This does *not* skip downloading, nor does it skip extracting source
+    distributions to a temporary/build directory.
+
+``--optimize=LEVEL``, ``-O LEVEL`` (New in 0.4a4)
+    If you are installing from a source distribution, and are *not* using the
+    ``--zip-ok`` option, this option controls the optimization level for
+    compiling installed ``.py`` files to ``.pyo`` files.  It does not affect
+    the compilation of modules contained in ``.egg`` files, only those in
+    ``.egg`` directories.  The optimization level can be set to 0, 1, or 2;
+    the default is 0 (unless it's set under ``install`` or ``install_lib`` in
+    one of your distutils configuration files).
+
+``--record=FILENAME``  (New in 0.5a4)
+    Write a record of all installed files to FILENAME.  This is basically the
+    same as the same option for the standard distutils "install" command, and
+    is included for compatibility with tools that expect to pass this option
+    to "setup.py install".
+
+``--site-dirs=DIRLIST, -S DIRLIST``   (New in 0.6a1)
+    Specify one or more custom "site" directories (separated by commas).
+    "Site" directories are directories where ``.pth`` files are processed, such
+    as the main Python ``site-packages`` directory.  As of 0.6a10, EasyInstall
+    automatically detects whether a given directory processes ``.pth`` files
+    (or can be made to do so), so you should not normally need to use this
+    option.  It is is now only necessary if you want to override EasyInstall's
+    judgment and force an installation directory to be treated as if it
+    supported ``.pth`` files.
+
+``--no-deps, -N``  (New in 0.6a6)
+    Don't install any dependencies.  This is intended as a convenience for
+    tools that wrap eggs in a platform-specific packaging system.  (We don't
+    recommend that you use it for anything else.)
+
+``--allow-hosts=PATTERNS, -H PATTERNS``   (New in 0.6a6)
+    Restrict downloading and spidering to hosts matching the specified glob
+    patterns.  E.g. ``-H *.python.org`` restricts web access so that only
+    packages listed and downloadable from machines in the ``python.org``
+    domain.  The glob patterns must match the *entire* user/host/port section of
+    the target URL(s).  For example, ``*.python.org`` will NOT accept a URL
+    like ``http://python.org/foo`` or ``http://www.python.org:8080/``.
+    Multiple patterns can be specified by separating them with commas.  The
+    default pattern is ``*``, which matches anything.
+
+    In general, this option is mainly useful for blocking EasyInstall's web
+    access altogether (e.g. ``-Hlocalhost``), or to restrict it to an intranet
+    or other trusted site.  EasyInstall will do the best it can to satisfy
+    dependencies given your host restrictions, but of course can fail if it
+    can't find suitable packages.  EasyInstall displays all blocked URLs, so
+    that you can adjust your ``--allow-hosts`` setting if it is more strict
+    than you intended.  Some sites may wish to define a restrictive default
+    setting for this option in their `configuration files`_, and then manually
+    override the setting on the command line as needed.
+
+``--prefix=DIR`` (New in 0.6a10)
+    Use the specified directory as a base for computing the default
+    installation and script directories.  On Windows, the resulting default
+    directories will be ``prefix\\Lib\\site-packages`` and ``prefix\\Scripts``,
+    while on other platforms the defaults will be
+    ``prefix/lib/python2.X/site-packages`` (with the appropriate version
+    substituted) for libraries and ``prefix/bin`` for scripts.
+
+    Note that the ``--prefix`` option only sets the *default* installation and
+    script directories, and does not override the ones set on the command line
+    or in a configuration file.
+
+``--local-snapshots-ok, -l`` (New in 0.6c6)
+    Normally, EasyInstall prefers to only install *released* versions of
+    projects, not in-development ones, because such projects may not
+    have a currently-valid version number.  So, it usually only installs them
+    when their ``setup.py`` directory is explicitly passed on the command line.
+
+    However, if this option is used, then any in-development projects that were
+    installed using the ``setup.py develop`` command, will be used to build
+    eggs, effectively upgrading the "in-development" project to a snapshot
+    release.  Normally, this option is used only in conjunction with the
+    ``--always-copy`` option to create a distributable snapshot of every egg
+    needed to run an application.
+
+    Note that if you use this option, you must make sure that there is a valid
+    version number (such as an SVN revision number tag) for any in-development
+    projects that may be used, as otherwise EasyInstall may not be able to tell
+    what version of the project is "newer" when future installations or
+    upgrades are attempted.
+
+
+.. _non-root installation:
+
+Custom Installation Locations
+-----------------------------
+
+By default, EasyInstall installs python packages into Python's main ``site-packages`` directory,
+and manages them using a custom ``.pth`` file in that same directory.
+
+Very often though, a user or developer wants ``easy_install`` to install and manage python packages
+in an alternative location, usually for one of 3 reasons:
+
+1. They don't have access to write to the main Python site-packages directory.
+
+2. They want a user-specific stash of packages, that is not visible to other users.
+
+3. They want to isolate a set of packages to a specific python application, usually to minimize
+   the possibility of version conflicts.
+
+Historically, there have been many approaches to achieve custom installation.
+The following section lists only the easiest and most relevant approaches [1]_.
+
+`Use the "--user" option`_
+
+`Use the "--user" option and customize "PYTHONUSERBASE"`_
+
+`Use "virtualenv"`_
+
+.. [1] There are older ways to achieve custom installation using various ``easy_install`` and ``setup.py install`` options, combined with ``PYTHONPATH`` and/or ``PYTHONUSERBASE`` alterations, but all of these are effectively deprecated by the User scheme brought in by `PEP-370`_.
+
+.. _PEP-370: http://www.python.org/dev/peps/pep-0370/
+
+
+Use the "--user" option
+~~~~~~~~~~~~~~~~~~~~~~~
+Python provides a User scheme for installation, which means that all
+python distributions support an alternative install location that is specific to a user [3]_.
+The Default location for each OS is explained in the python documentation
+for the ``site.USER_BASE`` variable.  This mode of installation can be turned on by
+specifying the ``--user`` option to ``setup.py install`` or ``easy_install``.
+This approach serves the need to have a user-specific stash of packages.
+
+.. [3] Prior to the User scheme, there was the Home scheme, which is still available, but requires more effort than the User scheme to get packages recognized.
+
+Use the "--user" option and customize "PYTHONUSERBASE"
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The User scheme install location can be customized by setting the ``PYTHONUSERBASE`` environment
+variable, which updates the value of ``site.USER_BASE``.  To isolate packages to a specific
+application, simply set the OS environment of that application to a specific value of
+``PYTHONUSERBASE``, that contains just those packages.
+
+Use "virtualenv"
+~~~~~~~~~~~~~~~~
+"virtualenv" is a 3rd-party python package that effectively "clones" a python installation, thereby
+creating an isolated location to install packages.  The evolution of "virtualenv" started before the existence
+of the User installation scheme.  "virtualenv" provides a version of ``easy_install`` that is
+scoped to the cloned python install and is used in the normal way. "virtualenv" does offer various features
+that the User installation scheme alone does not provide, e.g. the ability to hide the main python site-packages.
+
+Please refer to the `virtualenv`_ documentation for more details.
+
+.. _virtualenv: https://pypi.org/project/virtualenv/
+
+
+
+Package Index "API"
+-------------------
+
+Custom package indexes (and PyPI) must follow the following rules for
+EasyInstall to be able to look up and download packages:
+
+1. Except where stated otherwise, "pages" are HTML or XHTML, and "links"
+   refer to ``href`` attributes.
+
+2. Individual project version pages' URLs must be of the form
+   ``base/projectname/version``, where ``base`` is the package index's base URL.
+
+3. Omitting the ``/version`` part of a project page's URL (but keeping the
+   trailing ``/``) should result in a page that is either:
+
+   a) The single active version of that project, as though the version had been
+      explicitly included, OR
+
+   b) A page with links to all of the active version pages for that project.
+
+4. Individual project version pages should contain direct links to downloadable
+   distributions where possible.  It is explicitly permitted for a project's
+   "long_description" to include URLs, and these should be formatted as HTML
+   links by the package index, as EasyInstall does no special processing to
+   identify what parts of a page are index-specific and which are part of the
+   project's supplied description.
+
+5. Where available, MD5 information should be added to download URLs by
+   appending a fragment identifier of the form ``#md5=...``, where ``...`` is
+   the 32-character hex MD5 digest.  EasyInstall will verify that the
+   downloaded file's MD5 digest matches the given value.
+
+6. Individual project version pages should identify any "homepage" or
+   "download" URLs using ``rel="homepage"`` and ``rel="download"`` attributes
+   on the HTML elements linking to those URLs. Use of these attributes will
+   cause EasyInstall to always follow the provided links, unless it can be
+   determined by inspection that they are downloadable distributions. If the
+   links are not to downloadable distributions, they are retrieved, and if they
+   are HTML, they are scanned for download links. They are *not* scanned for
+   additional "homepage" or "download" links, as these are only processed for
+   pages that are part of a package index site.
+
+7. The root URL of the index, if retrieved with a trailing ``/``, must result
+   in a page containing links to *all* projects' active version pages.
+
+   (Note: This requirement is a workaround for the absence of case-insensitive
+   ``safe_name()`` matching of project names in URL paths. If project names are
+   matched in this fashion (e.g. via the PyPI server, mod_rewrite, or a similar
+   mechanism), then it is not necessary to include this all-packages listing
+   page.)
+
+8. If a package index is accessed via a ``file://`` URL, then EasyInstall will
+   automatically use ``index.html`` files, if present, when trying to read a
+   directory with a trailing ``/`` on the URL.
+
+
+Backward Compatibility
+~~~~~~~~~~~~~~~~~~~~~~
+
+Package indexes that wish to support setuptools versions prior to 0.6b4 should
+also follow these rules:
+
+* Homepage and download links must be preceded with ``"<th>Home Page"`` or
+  ``"<th>Download URL"``, in addition to (or instead of) the ``rel=""``
+  attributes on the actual links.  These marker strings do not need to be
+  visible, or uncommented, however!  For example, the following is a valid
+  homepage link that will work with any version of setuptools::
+
+    <li>
+     <strong>Home Page:</strong>
+     <!-- <th>Home Page -->
+     <a rel="homepage" href="http://sqlobject.org">http://sqlobject.org</a>
+    </li>
+
+  Even though the marker string is in an HTML comment, older versions of
+  EasyInstall will still "see" it and know that the link that follows is the
+  project's home page URL.
+
+* The pages described by paragraph 3(b) of the preceding section *must*
+  contain the string ``"Index of Packages</title>"`` somewhere in their text.
+  This can be inside of an HTML comment, if desired, and it can be anywhere
+  in the page.  (Note: this string MUST NOT appear on normal project pages, as
+  described in paragraphs 2 and 3(a)!)
+
+In addition, for compatibility with PyPI versions that do not use ``#md5=``
+fragment IDs, EasyInstall uses the following regular expression to match PyPI's
+displayed MD5 info (broken onto two lines for readability)::
+
+    <a href="([^"#]+)">([^<]+)</a>\n\s+\(<a href="[^?]+\?:action=show_md5
+    &amp;digest=([0-9a-f]{32})">md5</a>\)
+
+History
+=======
+
+0.6c9
+ * Fixed ``win32.exe`` support for .pth files, so unnecessary directory nesting
+   is flattened out in the resulting egg.  (There was a case-sensitivity
+   problem that affected some distributions, notably ``pywin32``.)
+
+ * Prevent ``--help-commands`` and other junk from showing under Python 2.5
+   when running ``easy_install --help``.
+
+ * Fixed GUI scripts sometimes not executing on Windows
+
+ * Fixed not picking up dependency links from recursive dependencies.
+
+ * Only make ``.py``, ``.dll`` and ``.so`` files executable when unpacking eggs
+
+ * Changes for Jython compatibility
+
+ * Improved error message when a requirement is also a directory name, but the
+   specified directory is not a source package.
+
+ * Fixed ``--allow-hosts`` option blocking ``file:`` URLs
+
+ * Fixed HTTP SVN detection failing when the page title included a project
+   name (e.g. on SourceForge-hosted SVN)
+
+ * Fix Jython script installation to handle ``#!`` lines better when
+   ``sys.executable`` is a script.
+
+ * Removed use of deprecated ``md5`` module if ``hashlib`` is available
+
+ * Keep site directories (e.g. ``site-packages``) from being included in
+   ``.pth`` files.
+
+0.6c7
+ * ``ftp:`` download URLs now work correctly.
+
+ * The default ``--index-url`` is now ``https://pypi.python.org/simple``, to use
+   the Python Package Index's new simpler (and faster!) REST API.
+
+0.6c6
+ * EasyInstall no longer aborts the installation process if a URL it wants to
+   retrieve can't be downloaded, unless the URL is an actual package download.
+   Instead, it issues a warning and tries to keep going.
+
+ * Fixed distutils-style scripts originally built on Windows having their line
+   endings doubled when installed on any platform.
+
+ * Added ``--local-snapshots-ok`` flag, to allow building eggs from projects
+   installed using ``setup.py develop``.
+
+ * Fixed not HTML-decoding URLs scraped from web pages
+
+0.6c5
+ * Fixed ``.dll`` files on Cygwin not having executable permissions when an egg
+   is installed unzipped.
+
+0.6c4
+ * Added support for HTTP "Basic" authentication using ``http://user:pass@host``
+   URLs.  If a password-protected page contains links to the same host (and
+   protocol), those links will inherit the credentials used to access the
+   original page.
+
+ * Removed all special support for Sourceforge mirrors, as Sourceforge's
+   mirror system now works well for non-browser downloads.
+
+ * Fixed not recognizing ``win32.exe`` installers that included a custom
+   bitmap.
+
+ * Fixed not allowing ``os.open()`` of paths outside the sandbox, even if they
+   are opened read-only (e.g. reading ``/dev/urandom`` for random numbers, as
+   is done by ``os.urandom()`` on some platforms).
+
+ * Fixed a problem with ``.pth`` testing on Windows when ``sys.executable``
+   has a space in it (e.g., the user installed Python to a ``Program Files``
+   directory).
+
+0.6c3
+ * You can once again use "python -m easy_install" with Python 2.4 and above.
+
+ * Python 2.5 compatibility fixes added.
+
+0.6c2
+ * Windows script wrappers now support quoted arguments and arguments
+   containing spaces.  (Patch contributed by Jim Fulton.)
+
+ * The ``ez_setup.py`` script now actually works when you put a setuptools
+   ``.egg`` alongside it for bootstrapping an offline machine.
+
+ * A writable installation directory on ``sys.path`` is no longer required to
+   download and extract a source distribution using ``--editable``.
+
+ * Generated scripts now use ``-x`` on the ``#!`` line when ``sys.executable``
+   contains non-ASCII characters, to prevent deprecation warnings about an
+   unspecified encoding when the script is run.
+
+0.6c1
+ * EasyInstall now includes setuptools version information in the
+   ``User-Agent`` string sent to websites it visits.
+
+0.6b4
+ * Fix creating Python wrappers for non-Python scripts
+
+ * Fix ``ftp://`` directory listing URLs from causing a crash when used in the
+   "Home page" or "Download URL" slots on PyPI.
+
+ * Fix ``sys.path_importer_cache`` not being updated when an existing zipfile
+   or directory is deleted/overwritten.
+
+ * Fix not recognizing HTML 404 pages from package indexes.
+
+ * Allow ``file://`` URLs to be used as a package index.  URLs that refer to
+   directories will use an internally-generated directory listing if there is
+   no ``index.html`` file in the directory.
+
+ * Allow external links in a package index to be specified using
+   ``rel="homepage"`` or ``rel="download"``, without needing the old
+   PyPI-specific visible markup.
+
+ * Suppressed warning message about possibly-misspelled project name, if an egg
+   or link for that project name has already been seen.
+
+0.6b3
+ * Fix local ``--find-links`` eggs not being copied except with
+   ``--always-copy``.
+
+ * Fix sometimes not detecting local packages installed outside of "site"
+   directories.
+
+ * Fix mysterious errors during initial ``setuptools`` install, caused by
+   ``ez_setup`` trying to run ``easy_install`` twice, due to a code fallthru
+   after deleting the egg from which it's running.
+
+0.6b2
+ * Don't install or update a ``site.py`` patch when installing to a
+   ``PYTHONPATH`` directory with ``--multi-version``, unless an
+   ``easy-install.pth`` file is already in use there.
+
+ * Construct ``.pth`` file paths in such a way that installing an egg whose
+   name begins with ``import`` doesn't cause a syntax error.
+
+ * Fixed a bogus warning message that wasn't updated since the 0.5 versions.
+
+0.6b1
+ * Better ambiguity management: accept ``#egg`` name/version even if processing
+   what appears to be a correctly-named distutils file, and ignore ``.egg``
+   files with no ``-``, since valid Python ``.egg`` files always have a version
+   number (but Scheme eggs often don't).
+
+ * Support ``file://`` links to directories in ``--find-links``, so that
+   easy_install can build packages from local source checkouts.
+
+ * Added automatic retry for Sourceforge mirrors.  The new download process is
+   to first just try dl.sourceforge.net, then randomly select mirror IPs and
+   remove ones that fail, until something works.  The removed IPs stay removed
+   for the remainder of the run.
+
+ * Ignore bdist_dumb distributions when looking at download URLs.
+
+0.6a11
+ * Process ``dependency_links.txt`` if found in a distribution, by adding the
+   URLs to the list for scanning.
+
+ * Use relative paths in ``.pth`` files when eggs are being installed to the
+   same directory as the ``.pth`` file.  This maximizes portability of the
+   target directory when building applications that contain eggs.
+
+ * Added ``easy_install-N.N`` script(s) for convenience when using multiple
+   Python versions.
+
+ * Added automatic handling of installation conflicts.  Eggs are now shifted to
+   the front of sys.path, in an order consistent with where they came from,
+   making EasyInstall seamlessly co-operate with system package managers.
+
+   The ``--delete-conflicting`` and ``--ignore-conflicts-at-my-risk`` options
+   are now no longer necessary, and will generate warnings at the end of a
+   run if you use them.
+
+ * Don't recursively traverse subdirectories given to ``--find-links``.
+
+0.6a10
+ * Added exhaustive testing of the install directory, including a spawn test
+   for ``.pth`` file support, and directory writability/existence checks.  This
+   should virtually eliminate the need to set or configure ``--site-dirs``.
+
+ * Added ``--prefix`` option for more do-what-I-mean-ishness in the absence of
+   RTFM-ing.  :)
+
+ * Enhanced ``PYTHONPATH`` support so that you don't have to put any eggs on it
+   manually to make it work.  ``--multi-version`` is no longer a silent
+   default; you must explicitly use it if installing to a non-PYTHONPATH,
+   non-"site" directory.
+
+ * Expand ``$variables`` used in the ``--site-dirs``, ``--build-directory``,
+   ``--install-dir``, and ``--script-dir`` options, whether on the command line
+   or in configuration files.
+
+ * Improved SourceForge mirror processing to work faster and be less affected
+   by transient HTML changes made by SourceForge.
+
+ * PyPI searches now use the exact spelling of requirements specified on the
+   command line or in a project's ``install_requires``.  Previously, a
+   normalized form of the name was used, which could lead to unnecessary
+   full-index searches when a project's name had an underscore (``_``) in it.
+
+ * EasyInstall can now download bare ``.py`` files and wrap them in an egg,
+   as long as you include an ``#egg=name-version`` suffix on the URL, or if
+   the ``.py`` file is listed as the "Download URL" on the project's PyPI page.
+   This allows third parties to "package" trivial Python modules just by
+   linking to them (e.g. from within their own PyPI page or download links
+   page).
+
+ * The ``--always-copy`` option now skips "system" and "development" eggs since
+   they can't be reliably copied.  Note that this may cause EasyInstall to
+   choose an older version of a package than what you expected, or it may cause
+   downloading and installation of a fresh version of what's already installed.
+
+ * The ``--find-links`` option previously scanned all supplied URLs and
+   directories as early as possible, but now only directories and direct
+   archive links are scanned immediately.  URLs are not retrieved unless a
+   package search was already going to go online due to a package not being
+   available locally, or due to the use of the ``--update`` or ``-U`` option.
+
+ * Fixed the annoying ``--help-commands`` wart.
+
+0.6a9
+ * Fixed ``.pth`` file processing picking up nested eggs (i.e. ones inside
+   "baskets") when they weren't explicitly listed in the ``.pth`` file.
+
+ * If more than one URL appears to describe the exact same distribution, prefer
+   the shortest one.  This helps to avoid "table of contents" CGI URLs like the
+   ones on effbot.org.
+
+ * Quote arguments to python.exe (including python's path) to avoid problems
+   when Python (or a script) is installed in a directory whose name contains
+   spaces on Windows.
+
+ * Support full roundtrip translation of eggs to and from ``bdist_wininst``
+   format.  Running ``bdist_wininst`` on a setuptools-based package wraps the
+   egg in an .exe that will safely install it as an egg (i.e., with metadata
+   and entry-point wrapper scripts), and ``easy_install`` can turn the .exe
+   back into an ``.egg`` file or directory and install it as such.
+
+0.6a8
+ * Update for changed SourceForge mirror format
+
+ * Fixed not installing dependencies for some packages fetched via Subversion
+
+ * Fixed dependency installation with ``--always-copy`` not using the same
+   dependency resolution procedure as other operations.
+
+ * Fixed not fully removing temporary directories on Windows, if a Subversion
+   checkout left read-only files behind
+
+ * Fixed some problems building extensions when Pyrex was installed, especially
+   with Python 2.4 and/or packages using SWIG.
+
+0.6a7
+ * Fixed not being able to install Windows script wrappers using Python 2.3
+
+0.6a6
+ * Added support for "traditional" PYTHONPATH-based non-root installation, and
+   also the convenient ``virtual-python.py`` script, based on a contribution
+   by Ian Bicking.  The setuptools egg now contains a hacked ``site`` module
+   that makes the PYTHONPATH-based approach work with .pth files, so that you
+   can get the full EasyInstall feature set on such installations.
+
+ * Added ``--no-deps`` and ``--allow-hosts`` options.
+
+ * Improved Windows ``.exe`` script wrappers so that the script can have the
+   same name as a module without confusing Python.
+
+ * Changed dependency processing so that it's breadth-first, allowing a
+   depender's preferences to override those of a dependee, to prevent conflicts
+   when a lower version is acceptable to the dependee, but not the depender.
+   Also, ensure that currently installed/selected packages aren't given
+   precedence over ones desired by a package being installed, which could
+   cause conflict errors.
+
+0.6a3
+ * Improved error message when trying to use old ways of running
+   ``easy_install``.  Removed the ability to run via ``python -m`` or by
+   running ``easy_install.py``; ``easy_install`` is the command to run on all
+   supported platforms.
+
+ * Improved wrapper script generation and runtime initialization so that a
+   VersionConflict doesn't occur if you later install a competing version of a
+   needed package as the default version of that package.
+
+ * Fixed a problem parsing version numbers in ``#egg=`` links.
+
+0.6a2
+ * EasyInstall can now install "console_scripts" defined by packages that use
+   ``setuptools`` and define appropriate entry points.  On Windows, console
+   scripts get an ``.exe`` wrapper so you can just type their name.  On other
+   platforms, the scripts are installed without a file extension.
+
+ * Using ``python -m easy_install`` or running ``easy_install.py`` is now
+   DEPRECATED, since an ``easy_install`` wrapper is now available on all
+   platforms.
+
+0.6a1
+ * EasyInstall now does MD5 validation of downloads from PyPI, or from any link
+   that has an "#md5=..." trailer with a 32-digit lowercase hex md5 digest.
+
+ * EasyInstall now handles symlinks in target directories by removing the link,
+   rather than attempting to overwrite the link's destination.  This makes it
+   easier to set up an alternate Python "home" directory (as described above in
+   the `Non-Root Installation`_ section).
+
+ * Added support for handling MacOS platform information in ``.egg`` filenames,
+   based on a contribution by Kevin Dangoor.  You may wish to delete and
+   reinstall any eggs whose filename includes "darwin" and "Power_Macintosh",
+   because the format for this platform information has changed so that minor
+   OS X upgrades (such as 10.4.1 to 10.4.2) do not cause eggs built with a
+   previous OS version to become obsolete.
+
+ * easy_install's dependency processing algorithms have changed.  When using
+   ``--always-copy``, it now ensures that dependencies are copied too.  When
+   not using ``--always-copy``, it tries to use a single resolution loop,
+   rather than recursing.
+
+ * Fixed installing extra ``.pyc`` or ``.pyo`` files for scripts with ``.py``
+   extensions.
+
+ * Added ``--site-dirs`` option to allow adding custom "site" directories.
+   Made ``easy-install.pth`` work in platform-specific alternate site
+   directories (e.g. ``~/Library/Python/2.x/site-packages`` on Mac OS X).
+
+ * If you manually delete the current version of a package, the next run of
+   EasyInstall against the target directory will now remove the stray entry
+   from the ``easy-install.pth`` file.
+
+ * EasyInstall now recognizes URLs with a ``#egg=project_name`` fragment ID
+   as pointing to the named project's source checkout.  Such URLs have a lower
+   match precedence than any other kind of distribution, so they'll only be
+   used if they have a higher version number than any other available
+   distribution, or if you use the ``--editable`` option.  The ``#egg``
+   fragment can contain a version if it's formatted as ``#egg=proj-ver``,
+   where ``proj`` is the project name, and ``ver`` is the version number.  You
+   *must* use the format for these values that the ``bdist_egg`` command uses;
+   i.e., all non-alphanumeric runs must be condensed to single underscore
+   characters.
+
+ * Added the ``--editable`` option; see `Editing and Viewing Source Packages`_
+   above for more info.  Also, slightly changed the behavior of the
+   ``--build-directory`` option.
+
+ * Fixed the setup script sandbox facility not recognizing certain paths as
+   valid on case-insensitive platforms.
+
+0.5a12
+ * Fix ``python -m easy_install`` not working due to setuptools being installed
+   as a zipfile.  Update safety scanner to check for modules that might be used
+   as ``python -m`` scripts.
+
+ * Misc. fixes for win32.exe support, including changes to support Python 2.4's
+   changed ``bdist_wininst`` format.
+
+0.5a10
+ * Put the ``easy_install`` module back in as a module, as it's needed for
+   ``python -m`` to run it!
+
+ * Allow ``--find-links/-f`` to accept local directories or filenames as well
+   as URLs.
+
+0.5a9
+ * EasyInstall now automatically detects when an "unmanaged" package or
+   module is going to be on ``sys.path`` ahead of a package you're installing,
+   thereby preventing the newer version from being imported.  By default, it
+   will abort installation to alert you of the problem, but there are also
+   new options (``--delete-conflicting`` and ``--ignore-conflicts-at-my-risk``)
+   available to change the default behavior.  (Note: this new feature doesn't
+   take effect for egg files that were built with older ``setuptools``
+   versions, because they lack the new metadata file required to implement it.)
+
+ * The ``easy_install`` distutils command now uses ``DistutilsError`` as its
+   base error type for errors that should just issue a message to stderr and
+   exit the program without a traceback.
+
+ * EasyInstall can now be given a path to a directory containing a setup
+   script, and it will attempt to build and install the package there.
+
+ * EasyInstall now performs a safety analysis on module contents to determine
+   whether a package is likely to run in zipped form, and displays
+   information about what modules may be doing introspection that would break
+   when running as a zipfile.
+
+ * Added the ``--always-unzip/-Z`` option, to force unzipping of packages that
+   would ordinarily be considered safe to unzip, and changed the meaning of
+   ``--zip-ok/-z`` to "always leave everything zipped".
+
+0.5a8
+ * There is now a separate documentation page for `setuptools`_; revision
+   history that's not specific to EasyInstall has been moved to that page.
+
+ .. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
+
+0.5a5
+ * Made ``easy_install`` a standard ``setuptools`` command, moving it from
+   the ``easy_install`` module to ``setuptools.command.easy_install``.  Note
+   that if you were importing or extending it, you must now change your imports
+   accordingly.  ``easy_install.py`` is still installed as a script, but not as
+   a module.
+
+0.5a4
+ * Added ``--always-copy/-a`` option to always copy needed packages to the
+   installation directory, even if they're already present elsewhere on
+   sys.path. (In previous versions, this was the default behavior, but now
+   you must request it.)
+
+ * Added ``--upgrade/-U`` option to force checking PyPI for latest available
+   version(s) of all packages requested by name and version, even if a matching
+   version is available locally.
+
+ * Added automatic installation of dependencies declared by a distribution
+   being installed.  These dependencies must be listed in the distribution's
+   ``EGG-INFO`` directory, so the distribution has to have declared its
+   dependencies by using setuptools.  If a package has requirements it didn't
+   declare, you'll still have to deal with them yourself.  (E.g., by asking
+   EasyInstall to find and install them.)
+
+ * Added the ``--record`` option to ``easy_install`` for the benefit of tools
+   that run ``setup.py install --record=filename`` on behalf of another
+   packaging system.)
+
+0.5a3
+ * Fixed not setting script permissions to allow execution.
+
+ * Improved sandboxing so that setup scripts that want a temporary directory
+   (e.g. pychecker) can still run in the sandbox.
+
+0.5a2
+ * Fix stupid stupid refactoring-at-the-last-minute typos.  :(
+
+0.5a1
+ * Added support for converting ``.win32.exe`` installers to eggs on the fly.
+   EasyInstall will now recognize such files by name and install them.
+
+ * Fixed a problem with picking the "best" version to install (versions were
+   being sorted as strings, rather than as parsed values)
+
+0.4a4
+ * Added support for the distutils "verbose/quiet" and "dry-run" options, as
+   well as the "optimize" flag.
+
+ * Support downloading packages that were uploaded to PyPI (by scanning all
+   links on package pages, not just the homepage/download links).
+
+0.4a3
+ * Add progress messages to the search/download process so that you can tell
+   what URLs it's reading to find download links.  (Hopefully, this will help
+   people report out-of-date and broken links to package authors, and to tell
+   when they've asked for a package that doesn't exist.)
+
+0.4a2
+ * Added support for installing scripts
+
+ * Added support for setting options via distutils configuration files, and
+   using distutils' default options as a basis for EasyInstall's defaults.
+
+ * Renamed ``--scan-url/-s`` to ``--find-links/-f`` to free up ``-s`` for the
+   script installation directory option.
+
+ * Use ``urllib2`` instead of ``urllib``, to allow use of ``https:`` URLs if
+   Python includes SSL support.
+
+0.4a1
+ * Added ``--scan-url`` and ``--index-url`` options, to scan download pages
+   and search PyPI for needed packages.
+
+0.3a4
+ * Restrict ``--build-directory=DIR/-b DIR`` option to only be used with single
+   URL installs, to avoid running the wrong setup.py.
+
+0.3a3
+ * Added ``--build-directory=DIR/-b DIR`` option.
+
+ * Added "installation report" that explains how to use 'require()' when doing
+   a multiversion install or alternate installation directory.
+
+ * Added SourceForge mirror auto-select (Contributed by Ian Bicking)
+
+ * Added "sandboxing" that stops a setup script from running if it attempts to
+   write to the filesystem outside of the build area
+
+ * Added more workarounds for packages with quirky ``install_data`` hacks
+
+0.3a2
+ * Added subversion download support for ``svn:`` and ``svn+`` URLs, as well as
+   automatic recognition of HTTP subversion URLs (Contributed by Ian Bicking)
+
+ * Misc. bug fixes
+
+0.3a1
+ * Initial release.
+
+
+Future Plans
+============
+
+* Additional utilities to list/remove/verify packages
+* Signature checking?  SSL?  Ability to suppress PyPI search?
+* Display byte progress meter when downloading distributions and long pages?
+* Redirect stdout/stderr to log during run_setup?
diff --git a/docs/formats.txt b/docs/formats.txt
new file mode 100644
index 0000000..a182eb9
--- /dev/null
+++ b/docs/formats.txt
@@ -0,0 +1,682 @@
+=====================================
+The Internal Structure of Python Eggs
+=====================================
+
+STOP! This is not the first document you should read!
+
+
+
+.. contents:: **Table of Contents**
+
+
+----------------------
+Eggs and their Formats
+----------------------
+
+A "Python egg" is a logical structure embodying the release of a
+specific version of a Python project, comprising its code, resources,
+and metadata. There are multiple formats that can be used to physically
+encode a Python egg, and others can be developed. However, a key
+principle of Python eggs is that they should be discoverable and
+importable. That is, it should be possible for a Python application to
+easily and efficiently find out what eggs are present on a system, and
+to ensure that the desired eggs' contents are importable.
+
+There are two basic formats currently implemented for Python eggs:
+
+1. ``.egg`` format: a directory or zipfile *containing* the project's
+   code and resources, along with an ``EGG-INFO`` subdirectory that
+   contains the project's metadata
+
+2. ``.egg-info`` format: a file or directory placed *adjacent* to the
+   project's code and resources, that directly contains the project's
+   metadata.
+
+Both formats can include arbitrary Python code and resources, including
+static data files, package and non-package directories, Python
+modules, C extension modules, and so on.  But each format is optimized
+for different purposes.
+
+The ``.egg`` format is well-suited to distribution and the easy
+uninstallation or upgrades of code, since the project is essentially
+self-contained within a single directory or file, unmingled with any
+other projects' code or resources.  It also makes it possible to have
+multiple versions of a project simultaneously installed, such that
+individual programs can select the versions they wish to use.
+
+The ``.egg-info`` format, on the other hand, was created to support
+backward-compatibility, performance, and ease of installation for system
+packaging tools that expect to install all projects' code and resources
+to a single directory (e.g. ``site-packages``).  Placing the metadata
+in that same directory simplifies the installation process, since it
+isn't necessary to create ``.pth`` files or otherwise modify
+``sys.path`` to include each installed egg.
+
+Its disadvantage, however, is that it provides no support for clean
+uninstallation or upgrades, and of course only a single version of a
+project can be installed to a given directory. Thus, support from a
+package management tool is required. (This is why setuptools' "install"
+command refers to this type of egg installation as "single-version,
+externally managed".)  Also, they lack sufficient data to allow them to
+be copied from their installation source.  easy_install can "ship" an
+application by copying ``.egg`` files or directories to a target
+location, but it cannot do this for ``.egg-info`` installs, because
+there is no way to tell what code and resources belong to a particular
+egg -- there may be several eggs "scrambled" together in a single
+installation location, and the ``.egg-info`` format does not currently
+include a way to list the files that were installed.  (This may change
+in a future version.)
+
+
+Code and Resources
+==================
+
+The layout of the code and resources is dictated by Python's normal
+import layout, relative to the egg's "base location".
+
+For the ``.egg`` format, the base location is the ``.egg`` itself. That
+is, adding the ``.egg`` filename or directory name to ``sys.path``
+makes its contents importable.
+
+For the ``.egg-info`` format, however, the base location is the
+directory that *contains* the ``.egg-info``, and thus it is the
+directory that must be added to ``sys.path`` to make the egg importable.
+(Note that this means that the "normal" installation of a package to a
+``sys.path`` directory is sufficient to make it an "egg" if it has an
+``.egg-info`` file or directory installed alongside of it.)
+
+
+Project Metadata
+=================
+
+If eggs contained only code and resources, there would of course be
+no difference between them and any other directory or zip file on
+``sys.path``.  Thus, metadata must also be included, using a metadata
+file or directory.
+
+For the ``.egg`` format, the metadata is placed in an ``EGG-INFO``
+subdirectory, directly within the ``.egg`` file or directory.  For the
+``.egg-info`` format, metadata is stored directly within the
+``.egg-info`` directory itself.
+
+The minimum project metadata that all eggs must have is a standard
+Python ``PKG-INFO`` file, named ``PKG-INFO`` and placed within the
+metadata directory appropriate to the format.  Because it's possible for
+this to be the only metadata file included, ``.egg-info`` format eggs
+are not required to be a directory; they can just be a ``.egg-info``
+file that directly contains the ``PKG-INFO`` metadata.  This eliminates
+the need to create a directory just to store one file.  This option is
+*not* available for ``.egg`` formats, since setuptools always includes
+other metadata.  (In fact, setuptools itself never generates
+``.egg-info`` files, either; the support for using files was added so
+that the requirement could easily be satisfied by other tools, such
+as distutils).
+
+In addition to the ``PKG-INFO`` file, an egg's metadata directory may
+also include files and directories representing various forms of
+optional standard metadata (see the section on `Standard Metadata`_,
+below) or user-defined metadata required by the project.  For example,
+some projects may define a metadata format to describe their application
+plugins, and metadata in this format would then be included by plugin
+creators in their projects' metadata directories.
+
+
+Filename-Embedded Metadata
+==========================
+
+To allow introspection of installed projects and runtime resolution of
+inter-project dependencies, a certain amount of information is embedded
+in egg filenames.  At a minimum, this includes the project name, and
+ideally will also include the project version number.  Optionally, it
+can also include the target Python version and required runtime
+platform if platform-specific C code is included.  The syntax of an
+egg filename is as follows::
+
+    name ["-" version ["-py" pyver ["-" required_platform]]] "." ext
+
+The "name" and "version" should be escaped using the ``to_filename()``
+function provided by ``pkg_resources``, after first processing them with
+``safe_name()`` and ``safe_version()`` respectively.  These latter two
+functions can also be used to later "unescape" these parts of the
+filename.  (For a detailed description of these transformations, please
+see the "Parsing Utilities" section of the ``pkg_resources`` manual.)
+
+The "pyver" string is the Python major version, as found in the first
+3 characters of ``sys.version``.  "required_platform" is essentially
+a distutils ``get_platform()`` string, but with enhancements to properly
+distinguish Mac OS versions.  (See the ``get_build_platform()``
+documentation in the "Platform Utilities" section of the
+``pkg_resources`` manual for more details.)
+
+Finally, the "ext" is either ``.egg`` or ``.egg-info``, as appropriate
+for the egg's format.
+
+Normally, an egg's filename should include at least the project name and
+version, as this allows the runtime system to find desired project
+versions without having to read the egg's PKG-INFO to determine its
+version number.
+
+Setuptools, however, only includes the version number in the filename
+when an ``.egg`` file is built using the ``bdist_egg`` command, or when
+an ``.egg-info`` directory is being installed by the
+``install_egg_info`` command. When generating metadata for use with the
+original source tree, it only includes the project name, so that the
+directory will not have to be renamed each time the project's version
+changes.
+
+This is especially important when version numbers change frequently, and
+the source metadata directory is kept under version control with the
+rest of the project.  (As would be the case when the project's source
+includes project-defined metadata that is not generated from by
+setuptools from data in the setup script.)
+
+
+Egg Links
+=========
+
+In addition to the ``.egg`` and ``.egg-info`` formats, there is a third
+egg-related extension that you may encounter on occasion: ``.egg-link``
+files.
+
+These files are not eggs, strictly speaking. They simply provide a way
+to reference an egg that is not physically installed in the desired
+location. They exist primarily as a cross-platform alternative to
+symbolic links, to support "installing" code that is being developed in
+a different location than the desired installation location. For
+example, if a user is developing an application plugin in their home
+directory, but the plugin needs to be "installed" in an application
+plugin directory, running "setup.py develop -md /path/to/app/plugins"
+will install an ``.egg-link`` file in ``/path/to/app/plugins``, that
+tells the egg runtime system where to find the actual egg (the user's
+project source directory and its ``.egg-info`` subdirectory).
+
+``.egg-link`` files are named following the format for ``.egg`` and
+``.egg-info`` names, but only the project name is included; no version,
+Python version, or platform information is included.  When the runtime
+searches for available eggs, ``.egg-link`` files are opened and the
+actual egg file/directory name is read from them.
+
+Each ``.egg-link`` file should contain a single file or directory name,
+with no newlines.  This filename should be the base location of one or
+more eggs.  That is, the name must either end in ``.egg``, or else it
+should be the parent directory of one or more ``.egg-info`` format eggs.
+
+As of setuptools 0.6c6, the path may be specified as a platform-independent
+(i.e. ``/``-separated) relative path from the directory containing the
+``.egg-link`` file, and a second line may appear in the file, specifying a
+platform-independent relative path from the egg's base directory to its
+setup script directory.  This allows installation tools such as EasyInstall
+to find the project's setup directory and build eggs or perform other setup
+commands on it.
+
+
+-----------------
+Standard Metadata
+-----------------
+
+In addition to the minimum required ``PKG-INFO`` metadata, projects can
+include a variety of standard metadata files or directories, as
+described below.  Except as otherwise noted, these files and directories
+are automatically generated by setuptools, based on information supplied
+in the setup script or through analysis of the project's code and
+resources.
+
+Most of these files and directories are generated via "egg-info
+writers" during execution of the setuptools ``egg_info`` command, and
+are listed in the ``egg_info.writers`` entry point group defined by
+setuptools' own ``setup.py`` file.
+
+Project authors can register their own metadata writers as entry points
+in this group (as described in the setuptools manual under "Adding new
+EGG-INFO Files") to cause setuptools to generate project-specific
+metadata files or directories during execution of the ``egg_info``
+command.  It is up to project authors to document these new metadata
+formats, if they create any.
+
+
+``.txt`` File Formats
+=====================
+
+Files described in this section that have ``.txt`` extensions have a
+simple lexical format consisting of a sequence of text lines, each line
+terminated by a linefeed character (regardless of platform).  Leading
+and trailing whitespace on each line is ignored, as are blank lines and
+lines whose first nonblank character is a ``#`` (comment symbol).  (This
+is the parsing format defined by the ``yield_lines()`` function of
+the ``pkg_resources`` module.)
+
+All ``.txt`` files defined by this section follow this format, but some
+are also "sectioned" files, meaning that their contents are divided into
+sections, using square-bracketed section headers akin to Windows
+``.ini`` format.  Note that this does *not* imply that the lines within
+the sections follow an ``.ini`` format, however.  Please see an
+individual metadata file's documentation for a description of what the
+lines and section names mean in that particular file.
+
+Sectioned files can be parsed using the ``split_sections()`` function;
+see the "Parsing Utilities" section of the ``pkg_resources`` manual for
+for details.
+
+
+Dependency Metadata
+===================
+
+
+``requires.txt``
+----------------
+
+This is a "sectioned" text file.  Each section is a sequence of
+"requirements", as parsed by the ``parse_requirements()`` function;
+please see the ``pkg_resources`` manual for the complete requirement
+parsing syntax.
+
+The first, unnamed section (i.e., before the first section header) in
+this file is the project's core requirements, which must be installed
+for the project to function.  (Specified using the ``install_requires``
+keyword to ``setup()``).
+
+The remaining (named) sections describe the project's "extra"
+requirements, as specified using the ``extras_require`` keyword to
+``setup()``.  The section name is the name of the optional feature, and
+the section body lists that feature's dependencies.
+
+Note that it is not normally necessary to inspect this file directly;
+``pkg_resources.Distribution`` objects have a ``requires()`` method
+that can be used to obtain ``Requirement`` objects describing the
+project's core and optional dependencies.
+
+
+``setup_requires.txt``
+----------------------
+
+Much like ``requires.txt`` except represents the requirements
+specified by the ``setup_requires`` parameter to the Distribution.
+
+
+``dependency_links.txt``
+------------------------
+
+A list of dependency URLs, one per line, as specified using the
+``dependency_links`` keyword to ``setup()``.  These may be direct
+download URLs, or the URLs of web pages containing direct download
+links, and will be used by EasyInstall to find dependencies, as though
+the user had manually provided them via the ``--find-links`` command
+line option.  Please see the setuptools manual and EasyInstall manual
+for more information on specifying this option, and for information on
+how EasyInstall processes ``--find-links`` URLs.
+
+
+``depends.txt`` -- Obsolete, do not create!
+-------------------------------------------
+
+This file follows an identical format to ``requires.txt``, but is
+obsolete and should not be used.  The earliest versions of setuptools
+required users to manually create and maintain this file, so the runtime
+still supports reading it, if it exists.  The new filename was created
+so that it could be automatically generated from ``setup()`` information
+without overwriting an existing hand-created ``depends.txt``, if one
+was already present in the project's source ``.egg-info`` directory.
+
+
+``namespace_packages.txt`` -- Namespace Package Metadata
+========================================================
+
+A list of namespace package names, one per line, as supplied to the
+``namespace_packages`` keyword to ``setup()``.  Please see the manuals
+for setuptools and ``pkg_resources`` for more information about
+namespace packages.
+
+
+``entry_points.txt`` -- "Entry Point"/Plugin Metadata
+=====================================================
+
+This is a "sectioned" text file, whose contents encode the
+``entry_points`` keyword supplied to ``setup()``.  All sections are
+named, as the section names specify the entry point groups in which the
+corresponding section's entry points are registered.
+
+Each section is a sequence of "entry point" lines, each parseable using
+the ``EntryPoint.parse`` classmethod; please see the ``pkg_resources``
+manual for the complete entry point parsing syntax.
+
+Note that it is not necessary to parse this file directly; the
+``pkg_resources`` module provides a variety of APIs to locate and load
+entry points automatically.  Please see the setuptools and
+``pkg_resources`` manuals for details on the nature and uses of entry
+points.
+
+
+The ``scripts`` Subdirectory
+============================
+
+This directory is currently only created for ``.egg`` files built by
+the setuptools ``bdist_egg`` command.  It will contain copies of all
+of the project's "traditional" scripts (i.e., those specified using the
+``scripts`` keyword to ``setup()``).  This is so that they can be
+reconstituted when an ``.egg`` file is installed.
+
+The scripts are placed here using the distutils' standard
+``install_scripts`` command, so any ``#!`` lines reflect the Python
+installation where the egg was built.  But instead of copying the
+scripts to the local script installation directory, EasyInstall writes
+short wrapper scripts that invoke the original scripts from inside the
+egg, after ensuring that sys.path includes the egg and any eggs it
+depends on.  For more about `script wrappers`_, see the section below on
+`Installation and Path Management Issues`_.
+
+
+Zip Support Metadata
+====================
+
+
+``native_libs.txt``
+-------------------
+
+A list of C extensions and other dynamic link libraries contained in
+the egg, one per line.  Paths are ``/``-separated and relative to the
+egg's base location.
+
+This file is generated as part of ``bdist_egg`` processing, and as such
+only appears in ``.egg`` files (and ``.egg`` directories created by
+unpacking them).  It is used to ensure that all libraries are extracted
+from a zipped egg at the same time, in case there is any direct linkage
+between them.  Please see the `Zip File Issues`_ section below for more
+information on library and resource extraction from ``.egg`` files.
+
+
+``eager_resources.txt``
+-----------------------
+
+A list of resource files and/or directories, one per line, as specified
+via the ``eager_resources`` keyword to ``setup()``.  Paths are
+``/``-separated and relative to the egg's base location.
+
+Resource files or directories listed here will be extracted
+simultaneously, if any of the named resources are extracted, or if any
+native libraries listed in ``native_libs.txt`` are extracted.  Please
+see the setuptools manual for details on what this feature is used for
+and how it works, as well as the `Zip File Issues`_ section below.
+
+
+``zip-safe`` and ``not-zip-safe``
+---------------------------------
+
+These are zero-length files, and either one or the other should exist.
+If ``zip-safe`` exists, it means that the project will work properly
+when installed as an ``.egg`` zipfile, and conversely the existence of
+``not-zip-safe`` means the project should not be installed as an
+``.egg`` file.  The ``zip_safe`` option to setuptools' ``setup()``
+determines which file will be written. If the option isn't provided,
+setuptools attempts to make its own assessment of whether the package
+can work, based on code and content analysis.
+
+If neither file is present at installation time, EasyInstall defaults
+to assuming that the project should be unzipped.  (Command-line options
+to EasyInstall, however, take precedence even over an existing
+``zip-safe`` or ``not-zip-safe`` file.)
+
+Note that these flag files appear only in ``.egg`` files generated by
+``bdist_egg``, and in ``.egg`` directories created by unpacking such an
+``.egg`` file.
+
+
+
+``top_level.txt`` -- Conflict Management Metadata
+=================================================
+
+This file is a list of the top-level module or package names provided
+by the project, one Python identifier per line.
+
+Subpackages are not included; a project containing both a ``foo.bar``
+and a ``foo.baz`` would include only one line, ``foo``, in its
+``top_level.txt``.
+
+This data is used by ``pkg_resources`` at runtime to issue a warning if
+an egg is added to ``sys.path`` when its contained packages may have
+already been imported.
+
+(It was also once used to detect conflicts with non-egg packages at
+installation time, but in more recent versions, setuptools installs eggs
+in such a way that they always override non-egg packages, thus
+preventing a problem from arising.)
+
+
+``SOURCES.txt`` -- Source Files Manifest
+========================================
+
+This file is roughly equivalent to the distutils' ``MANIFEST`` file.
+The differences are as follows:
+
+* The filenames always use ``/`` as a path separator, which must be
+  converted back to a platform-specific path whenever they are read.
+
+* The file is automatically generated by setuptools whenever the
+  ``egg_info`` or ``sdist`` commands are run, and it is *not*
+  user-editable.
+
+Although this metadata is included with distributed eggs, it is not
+actually used at runtime for any purpose.  Its function is to ensure
+that setuptools-built *source* distributions can correctly discover
+what files are part of the project's source, even if the list had been
+generated using revision control metadata on the original author's
+system.
+
+In other words, ``SOURCES.txt`` has little or no runtime value for being
+included in distributed eggs, and it is possible that future versions of
+the ``bdist_egg`` and ``install_egg_info`` commands will strip it before
+installation or distribution.  Therefore, do not rely on its being
+available outside of an original source directory or source
+distribution.
+
+
+------------------------------
+Other Technical Considerations
+------------------------------
+
+
+Zip File Issues
+===============
+
+Although zip files resemble directories, they are not fully
+substitutable for them.  Most platforms do not support loading dynamic
+link libraries contained in zipfiles, so it is not possible to directly
+import C extensions from ``.egg`` zipfiles.  Similarly, there are many
+existing libraries -- whether in Python or C -- that require actual
+operating system filenames, and do not work with arbitrary "file-like"
+objects or in-memory strings, and thus cannot operate directly on the
+contents of zip files.
+
+To address these issues, the ``pkg_resources`` module provides a
+"resource API" to support obtaining either the contents of a resource,
+or a true operating system filename for the resource.  If the egg
+containing the resource is a directory, the resource's real filename
+is simply returned.  However, if the egg is a zipfile, then the
+resource is first extracted to a cache directory, and the filename
+within the cache is returned.
+
+The cache directory is determined by the ``pkg_resources`` API; please
+see the ``set_cache_path()`` and ``get_default_cache()`` documentation
+for details.
+
+
+The Extraction Process
+----------------------
+
+Resources are extracted to a cache subdirectory whose name is based
+on the enclosing ``.egg`` filename and the path to the resource.  If
+there is already a file of the correct name, size, and timestamp, its
+filename is returned to the requester.  Otherwise, the desired file is
+extracted first to a temporary name generated using
+``mkstemp(".$extract",target_dir)``, and then its timestamp is set to
+match the one in the zip file, before renaming it to its final name.
+(Some collision detection and resolution code is used to handle the
+fact that Windows doesn't overwrite files when renaming.)
+
+If a resource directory is requested, all of its contents are
+recursively extracted in this fashion, to ensure that the directory
+name can be used as if it were valid all along.
+
+If the resource requested for extraction is listed in the
+``native_libs.txt`` or ``eager_resources.txt`` metadata files, then
+*all* resources listed in *either* file will be extracted before the
+requested resource's filename is returned, thus ensuring that all
+C extensions and data used by them will be simultaneously available.
+
+
+Extension Import Wrappers
+-------------------------
+
+Since Python's built-in zip import feature does not support loading
+C extension modules from zipfiles, the setuptools ``bdist_egg`` command
+generates special import wrappers to make it work.
+
+The wrappers are ``.py`` files (along with corresponding ``.pyc``
+and/or ``.pyo`` files) that have the same module name as the
+corresponding C extension.  These wrappers are located in the same
+package directory (or top-level directory) within the zipfile, so that
+say, ``foomodule.so`` will get a corresponding ``foo.py``, while
+``bar/baz.pyd`` will get a corresponding ``bar/baz.py``.
+
+These wrapper files contain a short stanza of Python code that asks
+``pkg_resources`` for the filename of the corresponding C extension,
+then reloads the module using the obtained filename.  This will cause
+``pkg_resources`` to first ensure that all of the egg's C extensions
+(and any accompanying "eager resources") are extracted to the cache
+before attempting to link to the C library.
+
+Note, by the way, that ``.egg`` directories will also contain these
+wrapper files.  However, Python's default import priority is such that
+C extensions take precedence over same-named Python modules, so the
+import wrappers are ignored unless the egg is a zipfile.
+
+
+Installation and Path Management Issues
+=======================================
+
+Python's initial setup of ``sys.path`` is very dependent on the Python
+version and installation platform, as well as how Python was started
+(i.e., script vs. ``-c`` vs. ``-m`` vs. interactive interpreter).
+In fact, Python also provides only two relatively robust ways to affect
+``sys.path`` outside of direct manipulation in code: the ``PYTHONPATH``
+environment variable, and ``.pth`` files.
+
+However, with no cross-platform way to safely and persistently change
+environment variables, this leaves ``.pth`` files as EasyInstall's only
+real option for persistent configuration of ``sys.path``.
+
+But ``.pth`` files are rather strictly limited in what they are allowed
+to do normally.  They add directories only to the *end* of ``sys.path``,
+after any locally-installed ``site-packages`` directory, and they are
+only processed *in* the ``site-packages`` directory to start with.
+
+This is a double whammy for users who lack write access to that
+directory, because they can't create a ``.pth`` file that Python will
+read, and even if a sympathetic system administrator adds one for them
+that calls ``site.addsitedir()`` to allow some other directory to
+contain ``.pth`` files, they won't be able to install newer versions of
+anything that's installed in the systemwide ``site-packages``, because
+their paths will still be added *after* ``site-packages``.
+
+So EasyInstall applies two workarounds to solve these problems.
+
+The first is that EasyInstall leverages ``.pth`` files' "import" feature
+to manipulate ``sys.path`` and ensure that anything EasyInstall adds
+to a ``.pth`` file will always appear before both the standard library
+and the local ``site-packages`` directories.  Thus, it is always
+possible for a user who can write a Python-read ``.pth`` file to ensure
+that their packages come first in their own environment.
+
+Second, when installing to a ``PYTHONPATH`` directory (as opposed to
+a "site" directory like ``site-packages``) EasyInstall will also install
+a special version of the ``site`` module.  Because it's in a
+``PYTHONPATH`` directory, this module will get control before the
+standard library version of ``site`` does.  It will record the state of
+``sys.path`` before invoking the "real" ``site`` module, and then
+afterwards it processes any ``.pth`` files found in ``PYTHONPATH``
+directories, including all the fixups needed to ensure that eggs always
+appear before the standard library in sys.path, but are in a relative
+order to one another that is defined by their ``PYTHONPATH`` and
+``.pth``-prescribed sequence.
+
+The net result of these changes is that ``sys.path`` order will be
+as follows at runtime:
+
+1. The ``sys.argv[0]`` directory, or an empty string if no script
+   is being executed.
+
+2. All eggs installed by EasyInstall in any ``.pth`` file in each
+   ``PYTHONPATH`` directory, in order first by ``PYTHONPATH`` order,
+   then normal ``.pth`` processing order (which is to say alphabetical
+   by ``.pth`` filename, then by the order of listing within each
+   ``.pth`` file).
+
+3. All eggs installed by EasyInstall in any ``.pth`` file in each "site"
+   directory (such as ``site-packages``), following the same ordering
+   rules as for the ones on ``PYTHONPATH``.
+
+4. The ``PYTHONPATH`` directories themselves, in their original order
+
+5. Any paths from ``.pth`` files found on ``PYTHONPATH`` that were *not*
+   eggs installed by EasyInstall, again following the same relative
+   ordering rules.
+
+6. The standard library and "site" directories, along with the contents
+   of any ``.pth`` files found in the "site" directories.
+
+Notice that sections 1, 4, and 6 comprise the "normal" Python setup for
+``sys.path``.  Sections 2 and 3 are inserted to support eggs, and
+section 5 emulates what the "normal" semantics of ``.pth`` files on
+``PYTHONPATH`` would be if Python natively supported them.
+
+For further discussion of the tradeoffs that went into this design, as
+well as notes on the actual magic inserted into ``.pth`` files to make
+them do these things, please see also the following messages to the
+distutils-SIG mailing list:
+
+* http://mail.python.org/pipermail/distutils-sig/2006-February/006026.html
+* http://mail.python.org/pipermail/distutils-sig/2006-March/006123.html
+
+
+Script Wrappers
+---------------
+
+EasyInstall never directly installs a project's original scripts to
+a script installation directory.  Instead, it writes short wrapper
+scripts that first ensure that the project's dependencies are active
+on sys.path, before invoking the original script.  These wrappers
+have a #! line that points to the version of Python that was used to
+install them, and their second line is always a comment that indicates
+the type of script wrapper, the project version required for the script
+to run, and information identifying the script to be invoked.
+
+The format of this marker line is::
+
+    "# EASY-INSTALL-" script_type ": " tuple_of_strings "\n"
+
+The ``script_type`` is one of ``SCRIPT``, ``DEV-SCRIPT``, or
+``ENTRY-SCRIPT``.  The ``tuple_of_strings`` is a comma-separated
+sequence of Python string constants.  For ``SCRIPT`` and ``DEV-SCRIPT``
+wrappers, there are two strings: the project version requirement, and
+the script name (as a filename within the ``scripts`` metadata
+directory).  For ``ENTRY-SCRIPT`` wrappers, there are three:
+the project version requirement, the entry point group name, and the
+entry point name.  (See the "Automatic Script Creation" section in the
+setuptools manual for more information about entry point scripts.)
+
+In each case, the project version requirement string will be a string
+parseable with the ``pkg_resources`` modules' ``Requirement.parse()``
+classmethod.  The only difference between a ``SCRIPT`` wrapper and a
+``DEV-SCRIPT`` is that a ``DEV-SCRIPT`` actually executes the original
+source script in the project's source tree, and is created when the
+"setup.py develop" command is run.  A ``SCRIPT`` wrapper, on the other
+hand, uses the "installed" script written to the ``EGG-INFO/scripts``
+subdirectory of the corresponding ``.egg`` zipfile or directory.
+(``.egg-info`` eggs do not have script wrappers associated with them,
+except in the "setup.py develop" case.)
+
+The purpose of including the marker line in generated script wrappers is
+to facilitate introspection of installed scripts, and their relationship
+to installed eggs.  For example, an uninstallation tool could use this
+data to identify what scripts can safely be removed, and/or identify
+what scripts would stop working if a particular egg is uninstalled.
+
diff --git a/docs/history.txt b/docs/history.txt
new file mode 100644
index 0000000..8fd1dc6
--- /dev/null
+++ b/docs/history.txt
@@ -0,0 +1,46 @@
+:tocdepth: 2
+
+.. _changes:
+
+History
+*******
+
+.. include:: ../CHANGES (links).rst
+
+Credits
+*******
+
+* The original design for the ``.egg`` format and the ``pkg_resources`` API was
+  co-created by Phillip Eby and Bob Ippolito. Bob also implemented the first
+  version of ``pkg_resources``, and supplied the OS X operating system version
+  compatibility algorithm.
+
+* Ian Bicking implemented many early "creature comfort" features of
+  easy_install, including support for downloading via Sourceforge and
+  Subversion repositories. Ian's comments on the Web-SIG about WSGI
+  application deployment also inspired the concept of "entry points" in eggs,
+  and he has given talks at PyCon and elsewhere to inform and educate the
+  community about eggs and setuptools.
+
+* Jim Fulton contributed time and effort to build automated tests of various
+  aspects of ``easy_install``, and supplied the doctests for the command-line
+  ``.exe`` wrappers on Windows.
+
+* Phillip J. Eby is the seminal author of setuptools, and
+  first proposed the idea of an importable binary distribution format for
+  Python application plug-ins.
+
+* Significant parts of the implementation of setuptools were funded by the Open
+  Source Applications Foundation, to provide a plug-in infrastructure for the
+  Chandler PIM application. In addition, many OSAF staffers (such as Mike
+  "Code Bear" Taylor) contributed their time and stress as guinea pigs for the
+  use of eggs and setuptools, even before eggs were "cool".  (Thanks, guys!)
+
+* Tarek Ziadé is the principal author of the Distribute fork, which
+  re-invigorated the community on the project, encouraged renewed innovation,
+  and addressed many defects.
+
+* Since the merge with Distribute, Jason R. Coombs is the
+  maintainer of setuptools. The project is maintained in coordination with
+  the Python Packaging Authority (PyPA) and the larger Python community.
+
diff --git a/docs/index.txt b/docs/index.txt
new file mode 100644
index 0000000..74aabb5
--- /dev/null
+++ b/docs/index.txt
@@ -0,0 +1,25 @@
+Welcome to Setuptools' documentation!
+=====================================
+
+Setuptools is a fully-featured, actively-maintained, and stable library
+designed to facilitate packaging Python projects, where packaging includes:
+
+ - Python package and module definitions
+ - Distribution package metadata
+ - Test hooks
+ - Project installation
+ - Platform-specific details
+ - Python 3 support
+
+Documentation content:
+
+.. toctree::
+   :maxdepth: 2
+
+   setuptools
+   easy_install
+   pkg_resources
+   python3
+   development
+   roadmap
+   history
diff --git a/docs/pkg_resources.txt b/docs/pkg_resources.txt
new file mode 100644
index 0000000..b40a209
--- /dev/null
+++ b/docs/pkg_resources.txt
@@ -0,0 +1,1941 @@
+=============================================================
+Package Discovery and Resource Access using ``pkg_resources``
+=============================================================
+
+The ``pkg_resources`` module distributed with ``setuptools`` provides an API
+for Python libraries to access their resource files, and for extensible
+applications and frameworks to automatically discover plugins.  It also
+provides runtime support for using C extensions that are inside zipfile-format
+eggs, support for merging packages that have separately-distributed modules or
+subpackages, and APIs for managing Python's current "working set" of active
+packages.
+
+
+.. contents:: **Table of Contents**
+
+
+--------
+Overview
+--------
+
+The ``pkg_resources`` module provides runtime facilities for finding,
+introspecting, activating and using installed Python distributions. Some
+of the more advanced features (notably the support for parallel installation
+of multiple versions) rely specifically on the "egg" format (either as a
+zip archive or subdirectory), while others (such as plugin discovery) will
+work correctly so long as "egg-info" metadata directories are available for
+relevant distributions.
+
+Eggs are a distribution format for Python modules, similar in concept to
+Java's "jars" or Ruby's "gems", or the "wheel" format defined in PEP 427.
+However, unlike a pure distribution format, eggs can also be installed and
+added directly to ``sys.path`` as an import location. When installed in
+this way, eggs are *discoverable*, meaning that they carry metadata that
+unambiguously identifies their contents and dependencies. This means that
+an installed egg can be *automatically* found and added to ``sys.path`` in
+response to simple requests of the form, "get me everything I need to use
+docutils' PDF support". This feature allows mutually conflicting versions of
+a distribution to co-exist in the same Python installation, with individual
+applications activating the desired version at runtime by manipulating the
+contents of ``sys.path`` (this differs from the virtual environment
+approach, which involves creating isolated environments for each
+application).
+
+The following terms are needed in order to explain the capabilities offered
+by this module:
+
+project
+    A library, framework, script, plugin, application, or collection of data
+    or other resources, or some combination thereof.  Projects are assumed to
+    have "relatively unique" names, e.g. names registered with PyPI.
+
+release
+    A snapshot of a project at a particular point in time, denoted by a version
+    identifier.
+
+distribution
+    A file or files that represent a particular release.
+
+importable distribution
+    A file or directory that, if placed on ``sys.path``, allows Python to
+    import any modules contained within it.
+
+pluggable distribution
+    An importable distribution whose filename unambiguously identifies its
+    release (i.e. project and version), and whose contents unambiguously
+    specify what releases of other projects will satisfy its runtime
+    requirements.
+
+extra
+    An "extra" is an optional feature of a release, that may impose additional
+    runtime requirements.  For example, if docutils PDF support required a
+    PDF support library to be present, docutils could define its PDF support as
+    an "extra", and list what other project releases need to be available in
+    order to provide it.
+
+environment
+    A collection of distributions potentially available for importing, but not
+    necessarily active.  More than one distribution (i.e. release version) for
+    a given project may be present in an environment.
+
+working set
+    A collection of distributions actually available for importing, as on
+    ``sys.path``.  At most one distribution (release version) of a given
+    project may be present in a working set, as otherwise there would be
+    ambiguity as to what to import.
+
+eggs
+    Eggs are pluggable distributions in one of the three formats currently
+    supported by ``pkg_resources``.  There are built eggs, development eggs,
+    and egg links.  Built eggs are directories or zipfiles whose name ends
+    with ``.egg`` and follows the egg naming conventions, and contain an
+    ``EGG-INFO`` subdirectory (zipped or otherwise).  Development eggs are
+    normal directories of Python code with one or more ``ProjectName.egg-info``
+    subdirectories. The development egg format is also used to provide a
+    default version of a distribution that is available to software that
+    doesn't use ``pkg_resources`` to request specific versions. Egg links
+    are ``*.egg-link`` files that contain the name of a built or
+    development egg, to support symbolic linking on platforms that do not
+    have native symbolic links (or where the symbolic link support is
+    limited).
+
+(For more information about these terms and concepts, see also this
+`architectural overview`_ of ``pkg_resources`` and Python Eggs in general.)
+
+.. _architectural overview: http://mail.python.org/pipermail/distutils-sig/2005-June/004652.html
+
+
+.. -----------------
+.. Developer's Guide
+.. -----------------
+
+.. This section isn't written yet.  Currently planned topics include
+    Accessing Resources
+    Finding and Activating Package Distributions
+        get_provider()
+        require()
+        WorkingSet
+        iter_distributions
+    Running Scripts
+    Configuration
+    Namespace Packages
+    Extensible Applications and Frameworks
+        Locating entry points
+        Activation listeners
+        Metadata access
+        Extended Discovery and Installation
+    Supporting Custom PEP 302 Implementations
+.. For now, please check out the extensive `API Reference`_ below.
+
+
+-------------
+API Reference
+-------------
+
+Namespace Package Support
+=========================
+
+A namespace package is a package that only contains other packages and modules,
+with no direct contents of its own.  Such packages can be split across
+multiple, separately-packaged distributions.  They are normally used to split
+up large packages produced by a single organization, such as in the ``zope``
+namespace package for Zope Corporation packages, and the ``peak`` namespace
+package for the Python Enterprise Application Kit.
+
+To create a namespace package, you list it in the ``namespace_packages``
+argument to ``setup()``, in your project's ``setup.py``.  (See the
+:ref:`setuptools documentation on namespace packages <Namespace Packages>` for
+more information on this.)  Also, you must add a ``declare_namespace()`` call
+in the package's ``__init__.py`` file(s):
+
+``declare_namespace(name)``
+    Declare that the dotted package name `name` is a "namespace package" whose
+    contained packages and modules may be spread across multiple distributions.
+    The named package's ``__path__`` will be extended to include the
+    corresponding package in all distributions on ``sys.path`` that contain a
+    package of that name.  (More precisely, if an importer's
+    ``find_module(name)`` returns a loader, then it will also be searched for
+    the package's contents.)  Whenever a Distribution's ``activate()`` method
+    is invoked, it checks for the presence of namespace packages and updates
+    their ``__path__`` contents accordingly.
+
+Applications that manipulate namespace packages or directly alter ``sys.path``
+at runtime may also need to use this API function:
+
+``fixup_namespace_packages(path_item)``
+    Declare that `path_item` is a newly added item on ``sys.path`` that may
+    need to be used to update existing namespace packages.  Ordinarily, this is
+    called for you when an egg is automatically added to ``sys.path``, but if
+    your application modifies ``sys.path`` to include locations that may
+    contain portions of a namespace package, you will need to call this
+    function to ensure they are added to the existing namespace packages.
+
+Although by default ``pkg_resources`` only supports namespace packages for
+filesystem and zip importers, you can extend its support to other "importers"
+compatible with PEP 302 using the ``register_namespace_handler()`` function.
+See the section below on `Supporting Custom Importers`_ for details.
+
+
+``WorkingSet`` Objects
+======================
+
+The ``WorkingSet`` class provides access to a collection of "active"
+distributions.  In general, there is only one meaningful ``WorkingSet``
+instance: the one that represents the distributions that are currently active
+on ``sys.path``.  This global instance is available under the name
+``working_set`` in the ``pkg_resources`` module.  However, specialized
+tools may wish to manipulate working sets that don't correspond to
+``sys.path``, and therefore may wish to create other ``WorkingSet`` instances.
+
+It's important to note that the global ``working_set`` object is initialized
+from ``sys.path`` when ``pkg_resources`` is first imported, but is only updated
+if you do all future ``sys.path`` manipulation via ``pkg_resources`` APIs.  If
+you manually modify ``sys.path``, you must invoke the appropriate methods on
+the ``working_set`` instance to keep it in sync.  Unfortunately, Python does
+not provide any way to detect arbitrary changes to a list object like
+``sys.path``, so ``pkg_resources`` cannot automatically update the
+``working_set`` based on changes to ``sys.path``.
+
+``WorkingSet(entries=None)``
+    Create a ``WorkingSet`` from an iterable of path entries.  If `entries`
+    is not supplied, it defaults to the value of ``sys.path`` at the time
+    the constructor is called.
+
+    Note that you will not normally construct ``WorkingSet`` instances
+    yourself, but instead you will implicitly or explicitly use the global
+    ``working_set`` instance.  For the most part, the ``pkg_resources`` API
+    is designed so that the ``working_set`` is used by default, such that you
+    don't have to explicitly refer to it most of the time.
+
+All distributions available directly on ``sys.path`` will be activated
+automatically when ``pkg_resources`` is imported. This behaviour can cause
+version conflicts for applications which require non-default versions of
+those distributions. To handle this situation, ``pkg_resources`` checks for a
+``__requires__`` attribute in the ``__main__`` module when initializing the
+default working set, and uses this to ensure a suitable version of each
+affected distribution is activated. For example::
+
+    __requires__ = ["CherryPy < 3"] # Must be set before pkg_resources import
+    import pkg_resources
+
+
+Basic ``WorkingSet`` Methods
+----------------------------
+
+The following methods of ``WorkingSet`` objects are also available as module-
+level functions in ``pkg_resources`` that apply to the default ``working_set``
+instance.  Thus, you can use e.g. ``pkg_resources.require()`` as an
+abbreviation for ``pkg_resources.working_set.require()``:
+
+
+``require(*requirements)``
+    Ensure that distributions matching `requirements` are activated
+
+    `requirements` must be a string or a (possibly-nested) sequence
+    thereof, specifying the distributions and versions required.  The
+    return value is a sequence of the distributions that needed to be
+    activated to fulfill the requirements; all relevant distributions are
+    included, even if they were already activated in this working set.
+
+    For the syntax of requirement specifiers, see the section below on
+    `Requirements Parsing`_.
+
+    In general, it should not be necessary for you to call this method
+    directly.  It's intended more for use in quick-and-dirty scripting and
+    interactive interpreter hacking than for production use. If you're creating
+    an actual library or application, it's strongly recommended that you create
+    a "setup.py" script using ``setuptools``, and declare all your requirements
+    there.  That way, tools like EasyInstall can automatically detect what
+    requirements your package has, and deal with them accordingly.
+
+    Note that calling ``require('SomePackage')`` will not install
+    ``SomePackage`` if it isn't already present.  If you need to do this, you
+    should use the ``resolve()`` method instead, which allows you to pass an
+    ``installer`` callback that will be invoked when a needed distribution
+    can't be found on the local machine.  You can then have this callback
+    display a dialog, automatically download the needed distribution, or
+    whatever else is appropriate for your application. See the documentation
+    below on the ``resolve()`` method for more information, and also on the
+    ``obtain()`` method of ``Environment`` objects.
+
+``run_script(requires, script_name)``
+    Locate distribution specified by `requires` and run its `script_name`
+    script.  `requires` must be a string containing a requirement specifier.
+    (See `Requirements Parsing`_ below for the syntax.)
+
+    The script, if found, will be executed in *the caller's globals*.  That's
+    because this method is intended to be called from wrapper scripts that
+    act as a proxy for the "real" scripts in a distribution.  A wrapper script
+    usually doesn't need to do anything but invoke this function with the
+    correct arguments.
+
+    If you need more control over the script execution environment, you
+    probably want to use the ``run_script()`` method of a ``Distribution``
+    object's `Metadata API`_ instead.
+
+``iter_entry_points(group, name=None)``
+    Yield entry point objects from `group` matching `name`
+
+    If `name` is None, yields all entry points in `group` from all
+    distributions in the working set, otherwise only ones matching both
+    `group` and `name` are yielded.  Entry points are yielded from the active
+    distributions in the order that the distributions appear in the working
+    set.  (For the global ``working_set``, this should be the same as the order
+    that they are listed in ``sys.path``.)  Note that within the entry points
+    advertised by an individual distribution, there is no particular ordering.
+
+    Please see the section below on `Entry Points`_ for more information.
+
+
+``WorkingSet`` Methods and Attributes
+-------------------------------------
+
+These methods are used to query or manipulate the contents of a specific
+working set, so they must be explicitly invoked on a particular ``WorkingSet``
+instance:
+
+``add_entry(entry)``
+    Add a path item to the ``entries``, finding any distributions on it.  You
+    should use this when you add additional items to ``sys.path`` and you want
+    the global ``working_set`` to reflect the change.  This method is also
+    called by the ``WorkingSet()`` constructor during initialization.
+
+    This method uses ``find_distributions(entry,True)`` to find distributions
+    corresponding to the path entry, and then ``add()`` them.  `entry` is
+    always appended to the ``entries`` attribute, even if it is already
+    present, however. (This is because ``sys.path`` can contain the same value
+    more than once, and the ``entries`` attribute should be able to reflect
+    this.)
+
+``__contains__(dist)``
+    True if `dist` is active in this ``WorkingSet``.  Note that only one
+    distribution for a given project can be active in a given ``WorkingSet``.
+
+``__iter__()``
+    Yield distributions for non-duplicate projects in the working set.
+    The yield order is the order in which the items' path entries were
+    added to the working set.
+
+``find(req)``
+    Find a distribution matching `req` (a ``Requirement`` instance).
+    If there is an active distribution for the requested project, this
+    returns it, as long as it meets the version requirement specified by
+    `req`.  But, if there is an active distribution for the project and it
+    does *not* meet the `req` requirement, ``VersionConflict`` is raised.
+    If there is no active distribution for the requested project, ``None``
+    is returned.
+
+``resolve(requirements, env=None, installer=None)``
+    List all distributions needed to (recursively) meet `requirements`
+
+    `requirements` must be a sequence of ``Requirement`` objects.  `env`,
+    if supplied, should be an ``Environment`` instance.  If
+    not supplied, an ``Environment`` is created from the working set's
+    ``entries``.  `installer`, if supplied, will be invoked with each
+    requirement that cannot be met by an already-installed distribution; it
+    should return a ``Distribution`` or ``None``.  (See the ``obtain()`` method
+    of `Environment Objects`_, below, for more information on the `installer`
+    argument.)
+
+``add(dist, entry=None)``
+    Add `dist` to working set, associated with `entry`
+
+    If `entry` is unspecified, it defaults to ``dist.location``.  On exit from
+    this routine, `entry` is added to the end of the working set's ``.entries``
+    (if it wasn't already present).
+
+    `dist` is only added to the working set if it's for a project that
+    doesn't already have a distribution active in the set.  If it's
+    successfully added, any  callbacks registered with the ``subscribe()``
+    method will be called.  (See `Receiving Change Notifications`_, below.)
+
+    Note: ``add()`` is automatically called for you by the ``require()``
+    method, so you don't normally need to use this method directly.
+
+``entries``
+    This attribute represents a "shadow" ``sys.path``, primarily useful for
+    debugging.  If you are experiencing import problems, you should check
+    the global ``working_set`` object's ``entries`` against ``sys.path``, to
+    ensure that they match.  If they do not, then some part of your program
+    is manipulating ``sys.path`` without updating the ``working_set``
+    accordingly.  IMPORTANT NOTE: do not directly manipulate this attribute!
+    Setting it equal to ``sys.path`` will not fix your problem, any more than
+    putting black tape over an "engine warning" light will fix your car!  If
+    this attribute is out of sync with ``sys.path``, it's merely an *indicator*
+    of the problem, not the cause of it.
+
+
+Receiving Change Notifications
+------------------------------
+
+Extensible applications and frameworks may need to receive notification when
+a new distribution (such as a plug-in component) has been added to a working
+set.  This is what the ``subscribe()`` method and ``add_activation_listener()``
+function are for.
+
+``subscribe(callback)``
+    Invoke ``callback(distribution)`` once for each active distribution that is
+    in the set now, or gets added later.  Because the callback is invoked for
+    already-active distributions, you do not need to loop over the working set
+    yourself to deal with the existing items; just register the callback and
+    be prepared for the fact that it will be called immediately by this method.
+
+    Note that callbacks *must not* allow exceptions to propagate, or they will
+    interfere with the operation of other callbacks and possibly result in an
+    inconsistent working set state.  Callbacks should use a try/except block
+    to ignore, log, or otherwise process any errors, especially since the code
+    that caused the callback to be invoked is unlikely to be able to handle
+    the errors any better than the callback itself.
+
+``pkg_resources.add_activation_listener()`` is an alternate spelling of
+``pkg_resources.working_set.subscribe()``.
+
+
+Locating Plugins
+----------------
+
+Extensible applications will sometimes have a "plugin directory" or a set of
+plugin directories, from which they want to load entry points or other
+metadata.  The ``find_plugins()`` method allows you to do this, by scanning an
+environment for the newest version of each project that can be safely loaded
+without conflicts or missing requirements.
+
+``find_plugins(plugin_env, full_env=None, fallback=True)``
+   Scan `plugin_env` and identify which distributions could be added to this
+   working set without version conflicts or missing requirements.
+
+   Example usage::
+
+       distributions, errors = working_set.find_plugins(
+           Environment(plugin_dirlist)
+       )
+       map(working_set.add, distributions)  # add plugins+libs to sys.path
+       print "Couldn't load", errors        # display errors
+
+   The `plugin_env` should be an ``Environment`` instance that contains only
+   distributions that are in the project's "plugin directory" or directories.
+   The `full_env`, if supplied, should be an ``Environment`` instance that
+   contains all currently-available distributions.
+
+   If `full_env` is not supplied, one is created automatically from the
+   ``WorkingSet`` this method is called on, which will typically mean that
+   every directory on ``sys.path`` will be scanned for distributions.
+
+   This method returns a 2-tuple: (`distributions`, `error_info`), where
+   `distributions` is a list of the distributions found in `plugin_env` that
+   were loadable, along with any other distributions that are needed to resolve
+   their dependencies.  `error_info` is a dictionary mapping unloadable plugin
+   distributions to an exception instance describing the error that occurred.
+   Usually this will be a ``DistributionNotFound`` or ``VersionConflict``
+   instance.
+
+   Most applications will use this method mainly on the master ``working_set``
+   instance in ``pkg_resources``, and then immediately add the returned
+   distributions to the working set so that they are available on sys.path.
+   This will make it possible to find any entry points, and allow any other
+   metadata tracking and hooks to be activated.
+
+   The resolution algorithm used by ``find_plugins()`` is as follows.  First,
+   the project names of the distributions present in `plugin_env` are sorted.
+   Then, each project's eggs are tried in descending version order (i.e.,
+   newest version first).
+
+   An attempt is made to resolve each egg's dependencies. If the attempt is
+   successful, the egg and its dependencies are added to the output list and to
+   a temporary copy of the working set.  The resolution process continues with
+   the next project name, and no older eggs for that project are tried.
+
+   If the resolution attempt fails, however, the error is added to the error
+   dictionary.  If the `fallback` flag is true, the next older version of the
+   plugin is tried, until a working version is found.  If false, the resolution
+   process continues with the next plugin project name.
+
+   Some applications may have stricter fallback requirements than others. For
+   example, an application that has a database schema or persistent objects
+   may not be able to safely downgrade a version of a package. Others may want
+   to ensure that a new plugin configuration is either 100% good or else
+   revert to a known-good configuration.  (That is, they may wish to revert to
+   a known configuration if the `error_info` return value is non-empty.)
+
+   Note that this algorithm gives precedence to satisfying the dependencies of
+   alphabetically prior project names in case of version conflicts. If two
+   projects named "AaronsPlugin" and "ZekesPlugin" both need different versions
+   of "TomsLibrary", then "AaronsPlugin" will win and "ZekesPlugin" will be
+   disabled due to version conflict.
+
+
+``Environment`` Objects
+=======================
+
+An "environment" is a collection of ``Distribution`` objects, usually ones
+that are present and potentially importable on the current platform.
+``Environment`` objects are used by ``pkg_resources`` to index available
+distributions during dependency resolution.
+
+``Environment(search_path=None, platform=get_supported_platform(), python=PY_MAJOR)``
+    Create an environment snapshot by scanning `search_path` for distributions
+    compatible with `platform` and `python`.  `search_path` should be a
+    sequence of strings such as might be used on ``sys.path``.  If a
+    `search_path` isn't supplied, ``sys.path`` is used.
+
+    `platform` is an optional string specifying the name of the platform
+    that platform-specific distributions must be compatible with.  If
+    unspecified, it defaults to the current platform.  `python` is an
+    optional string naming the desired version of Python (e.g. ``'2.4'``);
+    it defaults to the currently-running version.
+
+    You may explicitly set `platform` (and/or `python`) to ``None`` if you
+    wish to include *all* distributions, not just those compatible with the
+    running platform or Python version.
+
+    Note that `search_path` is scanned immediately for distributions, and the
+    resulting ``Environment`` is a snapshot of the found distributions.  It
+    is not automatically updated if the system's state changes due to e.g.
+    installation or removal of distributions.
+
+``__getitem__(project_name)``
+    Returns a list of distributions for the given project name, ordered
+    from newest to oldest version.  (And highest to lowest format precedence
+    for distributions that contain the same version of the project.)  If there
+    are no distributions for the project, returns an empty list.
+
+``__iter__()``
+    Yield the unique project names of the distributions in this environment.
+    The yielded names are always in lower case.
+
+``add(dist)``
+    Add `dist` to the environment if it matches the platform and python version
+    specified at creation time, and only if the distribution hasn't already
+    been added. (i.e., adding the same distribution more than once is a no-op.)
+
+``remove(dist)``
+    Remove `dist` from the environment.
+
+``can_add(dist)``
+    Is distribution `dist` acceptable for this environment?  If it's not
+    compatible with the ``platform`` and ``python`` version values specified
+    when the environment was created, a false value is returned.
+
+``__add__(dist_or_env)``  (``+`` operator)
+    Add a distribution or environment to an ``Environment`` instance, returning
+    a *new* environment object that contains all the distributions previously
+    contained by both.  The new environment will have a ``platform`` and
+    ``python`` of ``None``, meaning that it will not reject any distributions
+    from being added to it; it will simply accept whatever is added.  If you
+    want the added items to be filtered for platform and Python version, or
+    you want to add them to the *same* environment instance, you should use
+    in-place addition (``+=``) instead.
+
+``__iadd__(dist_or_env)``  (``+=`` operator)
+    Add a distribution or environment to an ``Environment`` instance
+    *in-place*, updating the existing instance and returning it.  The
+    ``platform`` and ``python`` filter attributes take effect, so distributions
+    in the source that do not have a suitable platform string or Python version
+    are silently ignored.
+
+``best_match(req, working_set, installer=None)``
+    Find distribution best matching `req` and usable on `working_set`
+
+    This calls the ``find(req)`` method of the `working_set` to see if a
+    suitable distribution is already active.  (This may raise
+    ``VersionConflict`` if an unsuitable version of the project is already
+    active in the specified `working_set`.)  If a suitable distribution isn't
+    active, this method returns the newest distribution in the environment
+    that meets the ``Requirement`` in `req`.  If no suitable distribution is
+    found, and `installer` is supplied, then the result of calling
+    the environment's ``obtain(req, installer)`` method will be returned.
+
+``obtain(requirement, installer=None)``
+    Obtain a distro that matches requirement (e.g. via download).  In the
+    base ``Environment`` class, this routine just returns
+    ``installer(requirement)``, unless `installer` is None, in which case
+    None is returned instead.  This method is a hook that allows subclasses
+    to attempt other ways of obtaining a distribution before falling back
+    to the `installer` argument.
+
+``scan(search_path=None)``
+    Scan `search_path` for distributions usable on `platform`
+
+    Any distributions found are added to the environment.  `search_path` should
+    be a sequence of strings such as might be used on ``sys.path``.  If not
+    supplied, ``sys.path`` is used.  Only distributions conforming to
+    the platform/python version defined at initialization are added.  This
+    method is a shortcut for using the ``find_distributions()`` function to
+    find the distributions from each item in `search_path`, and then calling
+    ``add()`` to add each one to the environment.
+
+
+``Requirement`` Objects
+=======================
+
+``Requirement`` objects express what versions of a project are suitable for
+some purpose.  These objects (or their string form) are used by various
+``pkg_resources`` APIs in order to find distributions that a script or
+distribution needs.
+
+
+Requirements Parsing
+--------------------
+
+``parse_requirements(s)``
+    Yield ``Requirement`` objects for a string or iterable of lines.  Each
+    requirement must start on a new line.  See below for syntax.
+
+``Requirement.parse(s)``
+    Create a ``Requirement`` object from a string or iterable of lines.  A
+    ``ValueError`` is raised if the string or lines do not contain a valid
+    requirement specifier, or if they contain more than one specifier.  (To
+    parse multiple specifiers from a string or iterable of strings, use
+    ``parse_requirements()`` instead.)
+
+    The syntax of a requirement specifier is defined in full in PEP 508.
+
+    Some examples of valid requirement specifiers::
+
+        FooProject >= 1.2
+        Fizzy [foo, bar]
+        PickyThing<1.6,>1.9,!=1.9.6,<2.0a0,==2.4c1
+        SomethingWhoseVersionIDontCareAbout
+        SomethingWithMarker[foo]>1.0;python_version<"2.7"
+
+    The project name is the only required portion of a requirement string, and
+    if it's the only thing supplied, the requirement will accept any version
+    of that project.
+
+    The "extras" in a requirement are used to request optional features of a
+    project, that may require additional project distributions in order to
+    function.  For example, if the hypothetical "Report-O-Rama" project offered
+    optional PDF support, it might require an additional library in order to
+    provide that support.  Thus, a project needing Report-O-Rama's PDF features
+    could use a requirement of ``Report-O-Rama[PDF]`` to request installation
+    or activation of both Report-O-Rama and any libraries it needs in order to
+    provide PDF support.  For example, you could use::
+
+        easy_install.py Report-O-Rama[PDF]
+
+    To install the necessary packages using the EasyInstall program, or call
+    ``pkg_resources.require('Report-O-Rama[PDF]')`` to add the necessary
+    distributions to sys.path at runtime.
+
+    The "markers" in a requirement are used to specify when a requirement
+    should be installed -- the requirement will be installed if the marker
+    evaluates as true in the current environment. For example, specifying
+    ``argparse;python_version<"3.0"`` will not install in an Python 3
+    environment, but will in a Python 2 environment.
+
+``Requirement`` Methods and Attributes
+--------------------------------------
+
+``__contains__(dist_or_version)``
+    Return true if `dist_or_version` fits the criteria for this requirement.
+    If `dist_or_version` is a ``Distribution`` object, its project name must
+    match the requirement's project name, and its version must meet the
+    requirement's version criteria.  If `dist_or_version` is a string, it is
+    parsed using the ``parse_version()`` utility function.  Otherwise, it is
+    assumed to be an already-parsed version.
+
+    The ``Requirement`` object's version specifiers (``.specs``) are internally
+    sorted into ascending version order, and used to establish what ranges of
+    versions are acceptable.  Adjacent redundant conditions are effectively
+    consolidated (e.g. ``">1, >2"`` produces the same results as ``">2"``, and
+    ``"<2,<3"`` produces the same results as ``"<2"``). ``"!="`` versions are
+    excised from the ranges they fall within.  The version being tested for
+    acceptability is then checked for membership in the resulting ranges.
+
+``__eq__(other_requirement)``
+    A requirement compares equal to another requirement if they have
+    case-insensitively equal project names, version specifiers, and "extras".
+    (The order that extras and version specifiers are in is also ignored.)
+    Equal requirements also have equal hashes, so that requirements can be
+    used in sets or as dictionary keys.
+
+``__str__()``
+    The string form of a ``Requirement`` is a string that, if passed to
+    ``Requirement.parse()``, would return an equal ``Requirement`` object.
+
+``project_name``
+    The name of the required project
+
+``key``
+    An all-lowercase version of the ``project_name``, useful for comparison
+    or indexing.
+
+``extras``
+    A tuple of names of "extras" that this requirement calls for.  (These will
+    be all-lowercase and normalized using the ``safe_extra()`` parsing utility
+    function, so they may not exactly equal the extras the requirement was
+    created with.)
+
+``specs``
+    A list of ``(op,version)`` tuples, sorted in ascending parsed-version
+    order.  The `op` in each tuple is a comparison operator, represented as
+    a string.  The `version` is the (unparsed) version number.
+
+``marker``
+    An instance of ``packaging.markers.Marker`` that allows evaluation
+    against the current environment. May be None if no marker specified.
+
+``url``
+    The location to download the requirement from if specified.
+
+Entry Points
+============
+
+Entry points are a simple way for distributions to "advertise" Python objects
+(such as functions or classes) for use by other distributions.  Extensible
+applications and frameworks can search for entry points with a particular name
+or group, either from a specific distribution or from all active distributions
+on sys.path, and then inspect or load the advertised objects at will.
+
+Entry points belong to "groups" which are named with a dotted name similar to
+a Python package or module name.  For example, the ``setuptools`` package uses
+an entry point named ``distutils.commands`` in order to find commands defined
+by distutils extensions.  ``setuptools`` treats the names of entry points
+defined in that group as the acceptable commands for a setup script.
+
+In a similar way, other packages can define their own entry point groups,
+either using dynamic names within the group (like ``distutils.commands``), or
+possibly using predefined names within the group.  For example, a blogging
+framework that offers various pre- or post-publishing hooks might define an
+entry point group and look for entry points named "pre_process" and
+"post_process" within that group.
+
+To advertise an entry point, a project needs to use ``setuptools`` and provide
+an ``entry_points`` argument to ``setup()`` in its setup script, so that the
+entry points will be included in the distribution's metadata.  For more
+details, see the ``setuptools`` documentation.  (XXX link here to setuptools)
+
+Each project distribution can advertise at most one entry point of a given
+name within the same entry point group.  For example, a distutils extension
+could advertise two different ``distutils.commands`` entry points, as long as
+they had different names.  However, there is nothing that prevents *different*
+projects from advertising entry points of the same name in the same group.  In
+some cases, this is a desirable thing, since the application or framework that
+uses the entry points may be calling them as hooks, or in some other way
+combining them.  It is up to the application or framework to decide what to do
+if multiple distributions advertise an entry point; some possibilities include
+using both entry points, displaying an error message, using the first one found
+in sys.path order, etc.
+
+
+Convenience API
+---------------
+
+In the following functions, the `dist` argument can be a ``Distribution``
+instance, a ``Requirement`` instance, or a string specifying a requirement
+(i.e. project name, version, etc.).  If the argument is a string or
+``Requirement``, the specified distribution is located (and added to sys.path
+if not already present).  An error will be raised if a matching distribution is
+not available.
+
+The `group` argument should be a string containing a dotted identifier,
+identifying an entry point group.  If you are defining an entry point group,
+you should include some portion of your package's name in the group name so as
+to avoid collision with other packages' entry point groups.
+
+``load_entry_point(dist, group, name)``
+    Load the named entry point from the specified distribution, or raise
+    ``ImportError``.
+
+``get_entry_info(dist, group, name)``
+    Return an ``EntryPoint`` object for the given `group` and `name` from
+    the specified distribution.  Returns ``None`` if the distribution has not
+    advertised a matching entry point.
+
+``get_entry_map(dist, group=None)``
+    Return the distribution's entry point map for `group`, or the full entry
+    map for the distribution.  This function always returns a dictionary,
+    even if the distribution advertises no entry points.  If `group` is given,
+    the dictionary maps entry point names to the corresponding ``EntryPoint``
+    object.  If `group` is None, the dictionary maps group names to
+    dictionaries that then map entry point names to the corresponding
+    ``EntryPoint`` instance in that group.
+
+``iter_entry_points(group, name=None)``
+    Yield entry point objects from `group` matching `name`.
+
+    If `name` is None, yields all entry points in `group` from all
+    distributions in the working set on sys.path, otherwise only ones matching
+    both `group` and `name` are yielded.  Entry points are yielded from
+    the active distributions in the order that the distributions appear on
+    sys.path.  (Within entry points for a particular distribution, however,
+    there is no particular ordering.)
+
+    (This API is actually a method of the global ``working_set`` object; see
+    the section above on `Basic WorkingSet Methods`_ for more information.)
+
+
+Creating and Parsing
+--------------------
+
+``EntryPoint(name, module_name, attrs=(), extras=(), dist=None)``
+    Create an ``EntryPoint`` instance.  `name` is the entry point name.  The
+    `module_name` is the (dotted) name of the module containing the advertised
+    object.  `attrs` is an optional tuple of names to look up from the
+    module to obtain the advertised object.  For example, an `attrs` of
+    ``("foo","bar")`` and a `module_name` of ``"baz"`` would mean that the
+    advertised object could be obtained by the following code::
+
+        import baz
+        advertised_object = baz.foo.bar
+
+    The `extras` are an optional tuple of "extra feature" names that the
+    distribution needs in order to provide this entry point.  When the
+    entry point is loaded, these extra features are looked up in the `dist`
+    argument to find out what other distributions may need to be activated
+    on sys.path; see the ``load()`` method for more details.  The `extras`
+    argument is only meaningful if `dist` is specified.  `dist` must be
+    a ``Distribution`` instance.
+
+``EntryPoint.parse(src, dist=None)`` (classmethod)
+    Parse a single entry point from string `src`
+
+    Entry point syntax follows the form::
+
+        name = some.module:some.attr [extra1,extra2]
+
+    The entry name and module name are required, but the ``:attrs`` and
+    ``[extras]`` parts are optional, as is the whitespace shown between
+    some of the items.  The `dist` argument is passed through to the
+    ``EntryPoint()`` constructor, along with the other values parsed from
+    `src`.
+
+``EntryPoint.parse_group(group, lines, dist=None)`` (classmethod)
+    Parse `lines` (a string or sequence of lines) to create a dictionary
+    mapping entry point names to ``EntryPoint`` objects.  ``ValueError`` is
+    raised if entry point names are duplicated, if `group` is not a valid
+    entry point group name, or if there are any syntax errors.  (Note: the
+    `group` parameter is used only for validation and to create more
+    informative error messages.)  If `dist` is provided, it will be used to
+    set the ``dist`` attribute of the created ``EntryPoint`` objects.
+
+``EntryPoint.parse_map(data, dist=None)`` (classmethod)
+    Parse `data` into a dictionary mapping group names to dictionaries mapping
+    entry point names to ``EntryPoint`` objects.  If `data` is a dictionary,
+    then the keys are used as group names and the values are passed to
+    ``parse_group()`` as the `lines` argument.  If `data` is a string or
+    sequence of lines, it is first split into .ini-style sections (using
+    the ``split_sections()`` utility function) and the section names are used
+    as group names.  In either case, the `dist` argument is passed through to
+    ``parse_group()`` so that the entry points will be linked to the specified
+    distribution.
+
+
+``EntryPoint`` Objects
+----------------------
+
+For simple introspection, ``EntryPoint`` objects have attributes that
+correspond exactly to the constructor argument names: ``name``,
+``module_name``, ``attrs``, ``extras``, and ``dist`` are all available.  In
+addition, the following methods are provided:
+
+``load()``
+    Load the entry point, returning the advertised Python object.  Effectively
+    calls ``self.require()`` then returns ``self.resolve()``.
+
+``require(env=None, installer=None)``
+    Ensure that any "extras" needed by the entry point are available on
+    sys.path.  ``UnknownExtra`` is raised if the ``EntryPoint`` has ``extras``,
+    but no ``dist``, or if the named extras are not defined by the
+    distribution.  If `env` is supplied, it must be an ``Environment``, and it
+    will be used to search for needed distributions if they are not already
+    present on sys.path.  If `installer` is supplied, it must be a callable
+    taking a ``Requirement`` instance and returning a matching importable
+    ``Distribution`` instance or None.
+
+``resolve()``
+    Resolve the entry point from its module and attrs, returning the advertised
+    Python object. Raises ``ImportError`` if it cannot be obtained.
+
+``__str__()``
+    The string form of an ``EntryPoint`` is a string that could be passed to
+    ``EntryPoint.parse()`` to produce an equivalent ``EntryPoint``.
+
+
+``Distribution`` Objects
+========================
+
+``Distribution`` objects represent collections of Python code that may or may
+not be importable, and may or may not have metadata and resources associated
+with them.  Their metadata may include information such as what other projects
+the distribution depends on, what entry points the distribution advertises, and
+so on.
+
+
+Getting or Creating Distributions
+---------------------------------
+
+Most commonly, you'll obtain ``Distribution`` objects from a ``WorkingSet`` or
+an ``Environment``.  (See the sections above on `WorkingSet Objects`_ and
+`Environment Objects`_, which are containers for active distributions and
+available distributions, respectively.)  You can also obtain ``Distribution``
+objects from one of these high-level APIs:
+
+``find_distributions(path_item, only=False)``
+    Yield distributions accessible via `path_item`.  If `only` is true, yield
+    only distributions whose ``location`` is equal to `path_item`.  In other
+    words, if `only` is true, this yields any distributions that would be
+    importable if `path_item` were on ``sys.path``.  If `only` is false, this
+    also yields distributions that are "in" or "under" `path_item`, but would
+    not be importable unless their locations were also added to ``sys.path``.
+
+``get_distribution(dist_spec)``
+    Return a ``Distribution`` object for a given ``Requirement`` or string.
+    If `dist_spec` is already a ``Distribution`` instance, it is returned.
+    If it is a ``Requirement`` object or a string that can be parsed into one,
+    it is used to locate and activate a matching distribution, which is then
+    returned.
+
+However, if you're creating specialized tools for working with distributions,
+or creating a new distribution format, you may also need to create
+``Distribution`` objects directly, using one of the three constructors below.
+
+These constructors all take an optional `metadata` argument, which is used to
+access any resources or metadata associated with the distribution.  `metadata`
+must be an object that implements the ``IResourceProvider`` interface, or None.
+If it is None, an ``EmptyProvider`` is used instead.  ``Distribution`` objects
+implement both the `IResourceProvider`_ and `IMetadataProvider Methods`_ by
+delegating them to the `metadata` object.
+
+``Distribution.from_location(location, basename, metadata=None, **kw)`` (classmethod)
+    Create a distribution for `location`, which must be a string such as a
+    URL, filename, or other string that might be used on ``sys.path``.
+    `basename` is a string naming the distribution, like ``Foo-1.2-py2.4.egg``.
+    If `basename` ends with ``.egg``, then the project's name, version, python
+    version and platform are extracted from the filename and used to set those
+    properties of the created distribution.  Any additional keyword arguments
+    are forwarded to the ``Distribution()`` constructor.
+
+``Distribution.from_filename(filename, metadata=None**kw)`` (classmethod)
+    Create a distribution by parsing a local filename.  This is a shorter way
+    of saying  ``Distribution.from_location(normalize_path(filename),
+    os.path.basename(filename), metadata)``.  In other words, it creates a
+    distribution whose location is the normalize form of the filename, parsing
+    name and version information from the base portion of the filename.  Any
+    additional keyword arguments are forwarded to the ``Distribution()``
+    constructor.
+
+``Distribution(location,metadata,project_name,version,py_version,platform,precedence)``
+    Create a distribution by setting its properties.  All arguments are
+    optional and default to None, except for `py_version` (which defaults to
+    the current Python version) and `precedence` (which defaults to
+    ``EGG_DIST``; for more details see ``precedence`` under `Distribution
+    Attributes`_ below).  Note that it's usually easier to use the
+    ``from_filename()`` or ``from_location()`` constructors than to specify
+    all these arguments individually.
+
+
+``Distribution`` Attributes
+---------------------------
+
+location
+    A string indicating the distribution's location.  For an importable
+    distribution, this is the string that would be added to ``sys.path`` to
+    make it actively importable.  For non-importable distributions, this is
+    simply a filename, URL, or other way of locating the distribution.
+
+project_name
+    A string, naming the project that this distribution is for.  Project names
+    are defined by a project's setup script, and they are used to identify
+    projects on PyPI.  When a ``Distribution`` is constructed, the
+    `project_name` argument is passed through the ``safe_name()`` utility
+    function to filter out any unacceptable characters.
+
+key
+    ``dist.key`` is short for ``dist.project_name.lower()``.  It's used for
+    case-insensitive comparison and indexing of distributions by project name.
+
+extras
+    A list of strings, giving the names of extra features defined by the
+    project's dependency list (the ``extras_require`` argument specified in
+    the project's setup script).
+
+version
+    A string denoting what release of the project this distribution contains.
+    When a ``Distribution`` is constructed, the `version` argument is passed
+    through the ``safe_version()`` utility function to filter out any
+    unacceptable characters.  If no `version` is specified at construction
+    time, then attempting to access this attribute later will cause the
+    ``Distribution`` to try to discover its version by reading its ``PKG-INFO``
+    metadata file.  If ``PKG-INFO`` is unavailable or can't be parsed,
+    ``ValueError`` is raised.
+
+parsed_version
+    The ``parsed_version`` is an object representing a "parsed" form of the
+    distribution's ``version``.  ``dist.parsed_version`` is a shortcut for
+    calling ``parse_version(dist.version)``.  It is used to compare or sort
+    distributions by version.  (See the `Parsing Utilities`_ section below for
+    more information on the ``parse_version()`` function.)  Note that accessing
+    ``parsed_version`` may result in a ``ValueError`` if the ``Distribution``
+    was constructed without a `version` and without `metadata` capable of
+    supplying the missing version info.
+
+py_version
+    The major/minor Python version the distribution supports, as a string.
+    For example, "2.7" or "3.4".  The default is the current version of Python.
+
+platform
+    A string representing the platform the distribution is intended for, or
+    ``None`` if the distribution is "pure Python" and therefore cross-platform.
+    See `Platform Utilities`_ below for more information on platform strings.
+
+precedence
+    A distribution's ``precedence`` is used to determine the relative order of
+    two distributions that have the same ``project_name`` and
+    ``parsed_version``.  The default precedence is ``pkg_resources.EGG_DIST``,
+    which is the highest (i.e. most preferred) precedence.  The full list
+    of predefined precedences, from most preferred to least preferred, is:
+    ``EGG_DIST``, ``BINARY_DIST``, ``SOURCE_DIST``, ``CHECKOUT_DIST``, and
+    ``DEVELOP_DIST``.  Normally, precedences other than ``EGG_DIST`` are used
+    only by the ``setuptools.package_index`` module, when sorting distributions
+    found in a package index to determine their suitability for installation.
+    "System" and "Development" eggs (i.e., ones that use the ``.egg-info``
+    format), however, are automatically given a precedence of ``DEVELOP_DIST``.
+
+
+
+``Distribution`` Methods
+------------------------
+
+``activate(path=None)``
+    Ensure distribution is importable on `path`.  If `path` is None,
+    ``sys.path`` is used instead.  This ensures that the distribution's
+    ``location`` is in the `path` list, and it also performs any necessary
+    namespace package fixups or declarations.  (That is, if the distribution
+    contains namespace packages, this method ensures that they are declared,
+    and that the distribution's contents for those namespace packages are
+    merged with the contents provided by any other active distributions.  See
+    the section above on `Namespace Package Support`_ for more information.)
+
+    ``pkg_resources`` adds a notification callback to the global ``working_set``
+    that ensures this method is called whenever a distribution is added to it.
+    Therefore, you should not normally need to explicitly call this method.
+    (Note that this means that namespace packages on ``sys.path`` are always
+    imported as soon as ``pkg_resources`` is, which is another reason why
+    namespace packages should not contain any code or import statements.)
+
+``as_requirement()``
+    Return a ``Requirement`` instance that matches this distribution's project
+    name and version.
+
+``requires(extras=())``
+    List the ``Requirement`` objects that specify this distribution's
+    dependencies.  If `extras` is specified, it should be a sequence of names
+    of "extras" defined by the distribution, and the list returned will then
+    include any dependencies needed to support the named "extras".
+
+``clone(**kw)``
+    Create a copy of the distribution.  Any supplied keyword arguments override
+    the corresponding argument to the ``Distribution()`` constructor, allowing
+    you to change some of the copied distribution's attributes.
+
+``egg_name()``
+    Return what this distribution's standard filename should be, not including
+    the ".egg" extension.  For example, a distribution for project "Foo"
+    version 1.2 that runs on Python 2.3 for Windows would have an ``egg_name()``
+    of ``Foo-1.2-py2.3-win32``.  Any dashes in the name or version are
+    converted to underscores.  (``Distribution.from_location()`` will convert
+    them back when parsing a ".egg" file name.)
+
+``__cmp__(other)``, ``__hash__()``
+    Distribution objects are hashed and compared on the basis of their parsed
+    version and precedence, followed by their key (lowercase project name),
+    location, Python version, and platform.
+
+The following methods are used to access ``EntryPoint`` objects advertised
+by the distribution.  See the section above on `Entry Points`_ for more
+detailed information about these operations:
+
+``get_entry_info(group, name)``
+    Return the ``EntryPoint`` object for `group` and `name`, or None if no
+    such point is advertised by this distribution.
+
+``get_entry_map(group=None)``
+    Return the entry point map for `group`.  If `group` is None, return
+    a dictionary mapping group names to entry point maps for all groups.
+    (An entry point map is a dictionary of entry point names to ``EntryPoint``
+    objects.)
+
+``load_entry_point(group, name)``
+    Short for ``get_entry_info(group, name).load()``.  Returns the object
+    advertised by the named entry point, or raises ``ImportError`` if
+    the entry point isn't advertised by this distribution, or there is some
+    other import problem.
+
+In addition to the above methods, ``Distribution`` objects also implement all
+of the `IResourceProvider`_ and `IMetadataProvider Methods`_ (which are
+documented in later sections):
+
+* ``has_metadata(name)``
+* ``metadata_isdir(name)``
+* ``metadata_listdir(name)``
+* ``get_metadata(name)``
+* ``get_metadata_lines(name)``
+* ``run_script(script_name, namespace)``
+* ``get_resource_filename(manager, resource_name)``
+* ``get_resource_stream(manager, resource_name)``
+* ``get_resource_string(manager, resource_name)``
+* ``has_resource(resource_name)``
+* ``resource_isdir(resource_name)``
+* ``resource_listdir(resource_name)``
+
+If the distribution was created with a `metadata` argument, these resource and
+metadata access methods are all delegated to that `metadata` provider.
+Otherwise, they are delegated to an ``EmptyProvider``, so that the distribution
+will appear to have no resources or metadata.  This delegation approach is used
+so that supporting custom importers or new distribution formats can be done
+simply by creating an appropriate `IResourceProvider`_ implementation; see the
+section below on `Supporting Custom Importers`_ for more details.
+
+
+``ResourceManager`` API
+=======================
+
+The ``ResourceManager`` class provides uniform access to package resources,
+whether those resources exist as files and directories or are compressed in
+an archive of some kind.
+
+Normally, you do not need to create or explicitly manage ``ResourceManager``
+instances, as the ``pkg_resources`` module creates a global instance for you,
+and makes most of its methods available as top-level names in the
+``pkg_resources`` module namespace.  So, for example, this code actually
+calls the ``resource_string()`` method of the global ``ResourceManager``::
+
+    import pkg_resources
+    my_data = pkg_resources.resource_string(__name__, "foo.dat")
+
+Thus, you can use the APIs below without needing an explicit
+``ResourceManager`` instance; just import and use them as needed.
+
+
+Basic Resource Access
+---------------------
+
+In the following methods, the `package_or_requirement` argument may be either
+a Python package/module name (e.g. ``foo.bar``) or a ``Requirement`` instance.
+If it is a package or module name, the named module or package must be
+importable (i.e., be in a distribution or directory on ``sys.path``), and the
+`resource_name` argument is interpreted relative to the named package.  (Note
+that if a module name is used, then the resource name is relative to the
+package immediately containing the named module.  Also, you should not use use
+a namespace package name, because a namespace package can be spread across
+multiple distributions, and is therefore ambiguous as to which distribution
+should be searched for the resource.)
+
+If it is a ``Requirement``, then the requirement is automatically resolved
+(searching the current ``Environment`` if necessary) and a matching
+distribution is added to the ``WorkingSet`` and ``sys.path`` if one was not
+already present.  (Unless the ``Requirement`` can't be satisfied, in which
+case an exception is raised.)  The `resource_name` argument is then interpreted
+relative to the root of the identified distribution; i.e. its first path
+segment will be treated as a peer of the top-level modules or packages in the
+distribution.
+
+Note that resource names must be ``/``-separated paths and cannot be absolute
+(i.e. no leading ``/``) or contain relative names like ``".."``.  Do *not* use
+``os.path`` routines to manipulate resource paths, as they are *not* filesystem
+paths.
+
+``resource_exists(package_or_requirement, resource_name)``
+    Does the named resource exist?  Return ``True`` or ``False`` accordingly.
+
+``resource_stream(package_or_requirement, resource_name)``
+    Return a readable file-like object for the specified resource; it may be
+    an actual file, a ``StringIO``, or some similar object.  The stream is
+    in "binary mode", in the sense that whatever bytes are in the resource
+    will be read as-is.
+
+``resource_string(package_or_requirement, resource_name)``
+    Return the specified resource as a string.  The resource is read in
+    binary fashion, such that the returned string contains exactly the bytes
+    that are stored in the resource.
+
+``resource_isdir(package_or_requirement, resource_name)``
+    Is the named resource a directory?  Return ``True`` or ``False``
+    accordingly.
+
+``resource_listdir(package_or_requirement, resource_name)``
+    List the contents of the named resource directory, just like ``os.listdir``
+    except that it works even if the resource is in a zipfile.
+
+Note that only ``resource_exists()`` and ``resource_isdir()`` are insensitive
+as to the resource type.  You cannot use ``resource_listdir()`` on a file
+resource, and you can't use ``resource_string()`` or ``resource_stream()`` on
+directory resources.  Using an inappropriate method for the resource type may
+result in an exception or undefined behavior, depending on the platform and
+distribution format involved.
+
+
+Resource Extraction
+-------------------
+
+``resource_filename(package_or_requirement, resource_name)``
+    Sometimes, it is not sufficient to access a resource in string or stream
+    form, and a true filesystem filename is needed.  In such cases, you can
+    use this method (or module-level function) to obtain a filename for a
+    resource.  If the resource is in an archive distribution (such as a zipped
+    egg), it will be extracted to a cache directory, and the filename within
+    the cache will be returned.  If the named resource is a directory, then
+    all resources within that directory (including subdirectories) are also
+    extracted.  If the named resource is a C extension or "eager resource"
+    (see the ``setuptools`` documentation for details), then all C extensions
+    and eager resources are extracted at the same time.
+
+    Archived resources are extracted to a cache location that can be managed by
+    the following two methods:
+
+``set_extraction_path(path)``
+    Set the base path where resources will be extracted to, if needed.
+
+    If you do not call this routine before any extractions take place, the
+    path defaults to the return value of ``get_default_cache()``.  (Which is
+    based on the ``PYTHON_EGG_CACHE`` environment variable, with various
+    platform-specific fallbacks.  See that routine's documentation for more
+    details.)
+
+    Resources are extracted to subdirectories of this path based upon
+    information given by the resource provider.  You may set this to a
+    temporary directory, but then you must call ``cleanup_resources()`` to
+    delete the extracted files when done.  There is no guarantee that
+    ``cleanup_resources()`` will be able to remove all extracted files.  (On
+    Windows, for example, you can't unlink .pyd or .dll files that are still
+    in use.)
+
+    Note that you may not change the extraction path for a given resource
+    manager once resources have been extracted, unless you first call
+    ``cleanup_resources()``.
+
+``cleanup_resources(force=False)``
+    Delete all extracted resource files and directories, returning a list
+    of the file and directory names that could not be successfully removed.
+    This function does not have any concurrency protection, so it should
+    generally only be called when the extraction path is a temporary
+    directory exclusive to a single process.  This method is not
+    automatically called; you must call it explicitly or register it as an
+    ``atexit`` function if you wish to ensure cleanup of a temporary
+    directory used for extractions.
+
+
+"Provider" Interface
+--------------------
+
+If you are implementing an ``IResourceProvider`` and/or ``IMetadataProvider``
+for a new distribution archive format, you may need to use the following
+``IResourceManager`` methods to co-ordinate extraction of resources to the
+filesystem.  If you're not implementing an archive format, however, you have
+no need to use these methods.  Unlike the other methods listed above, they are
+*not* available as top-level functions tied to the global ``ResourceManager``;
+you must therefore have an explicit ``ResourceManager`` instance to use them.
+
+``get_cache_path(archive_name, names=())``
+    Return absolute location in cache for `archive_name` and `names`
+
+    The parent directory of the resulting path will be created if it does
+    not already exist.  `archive_name` should be the base filename of the
+    enclosing egg (which may not be the name of the enclosing zipfile!),
+    including its ".egg" extension.  `names`, if provided, should be a
+    sequence of path name parts "under" the egg's extraction location.
+
+    This method should only be called by resource providers that need to
+    obtain an extraction location, and only for names they intend to
+    extract, as it tracks the generated names for possible cleanup later.
+
+``extraction_error()``
+    Raise an ``ExtractionError`` describing the active exception as interfering
+    with the extraction process.  You should call this if you encounter any
+    OS errors extracting the file to the cache path; it will format the
+    operating system exception for you, and add other information to the
+    ``ExtractionError`` instance that may be needed by programs that want to
+    wrap or handle extraction errors themselves.
+
+``postprocess(tempname, filename)``
+    Perform any platform-specific postprocessing of `tempname`.
+    Resource providers should call this method ONLY after successfully
+    extracting a compressed resource.  They must NOT call it on resources
+    that are already in the filesystem.
+
+    `tempname` is the current (temporary) name of the file, and `filename`
+    is the name it will be renamed to by the caller after this routine
+    returns.
+
+
+Metadata API
+============
+
+The metadata API is used to access metadata resources bundled in a pluggable
+distribution.  Metadata resources are virtual files or directories containing
+information about the distribution, such as might be used by an extensible
+application or framework to connect "plugins".  Like other kinds of resources,
+metadata resource names are ``/``-separated and should not contain ``..`` or
+begin with a ``/``.  You should not use ``os.path`` routines to manipulate
+resource paths.
+
+The metadata API is provided by objects implementing the ``IMetadataProvider``
+or ``IResourceProvider`` interfaces.  ``Distribution`` objects implement this
+interface, as do objects returned by the ``get_provider()`` function:
+
+``get_provider(package_or_requirement)``
+    If a package name is supplied, return an ``IResourceProvider`` for the
+    package.  If a ``Requirement`` is supplied, resolve it by returning a
+    ``Distribution`` from the current working set (searching the current
+    ``Environment`` if necessary and adding the newly found ``Distribution``
+    to the working set).  If the named package can't be imported, or the
+    ``Requirement`` can't be satisfied, an exception is raised.
+
+    NOTE: if you use a package name rather than a ``Requirement``, the object
+    you get back may not be a pluggable distribution, depending on the method
+    by which the package was installed.  In particular, "development" packages
+    and "single-version externally-managed" packages do not have any way to
+    map from a package name to the corresponding project's metadata.  Do not
+    write code that passes a package name to ``get_provider()`` and then tries
+    to retrieve project metadata from the returned object.  It may appear to
+    work when the named package is in an ``.egg`` file or directory, but
+    it will fail in other installation scenarios.  If you want project
+    metadata, you need to ask for a *project*, not a package.
+
+
+``IMetadataProvider`` Methods
+-----------------------------
+
+The methods provided by objects (such as ``Distribution`` instances) that
+implement the ``IMetadataProvider`` or ``IResourceProvider`` interfaces are:
+
+``has_metadata(name)``
+    Does the named metadata resource exist?
+
+``metadata_isdir(name)``
+    Is the named metadata resource a directory?
+
+``metadata_listdir(name)``
+    List of metadata names in the directory (like ``os.listdir()``)
+
+``get_metadata(name)``
+    Return the named metadata resource as a string.  The data is read in binary
+    mode; i.e., the exact bytes of the resource file are returned.
+
+``get_metadata_lines(name)``
+    Yield named metadata resource as list of non-blank non-comment lines.  This
+    is short for calling ``yield_lines(provider.get_metadata(name))``.  See the
+    section on `yield_lines()`_ below for more information on the syntax it
+    recognizes.
+
+``run_script(script_name, namespace)``
+    Execute the named script in the supplied namespace dictionary.  Raises
+    ``ResolutionError`` if there is no script by that name in the ``scripts``
+    metadata directory.  `namespace` should be a Python dictionary, usually
+    a module dictionary if the script is being run as a module.
+
+
+Exceptions
+==========
+
+``pkg_resources`` provides a simple exception hierarchy for problems that may
+occur when processing requests to locate and activate packages::
+
+    ResolutionError
+        DistributionNotFound
+        VersionConflict
+        UnknownExtra
+
+    ExtractionError
+
+``ResolutionError``
+    This class is used as a base class for the other three exceptions, so that
+    you can catch all of them with a single "except" clause.  It is also raised
+    directly for miscellaneous requirement-resolution problems like trying to
+    run a script that doesn't exist in the distribution it was requested from.
+
+``DistributionNotFound``
+    A distribution needed to fulfill a requirement could not be found.
+
+``VersionConflict``
+    The requested version of a project conflicts with an already-activated
+    version of the same project.
+
+``UnknownExtra``
+    One of the "extras" requested was not recognized by the distribution it
+    was requested from.
+
+``ExtractionError``
+    A problem occurred extracting a resource to the Python Egg cache.  The
+    following attributes are available on instances of this exception:
+
+    manager
+        The resource manager that raised this exception
+
+    cache_path
+        The base directory for resource extraction
+
+    original_error
+        The exception instance that caused extraction to fail
+
+
+Supporting Custom Importers
+===========================
+
+By default, ``pkg_resources`` supports normal filesystem imports, and
+``zipimport`` importers.  If you wish to use the ``pkg_resources`` features
+with other (PEP 302-compatible) importers or module loaders, you may need to
+register various handlers and support functions using these APIs:
+
+``register_finder(importer_type, distribution_finder)``
+    Register `distribution_finder` to find distributions in ``sys.path`` items.
+    `importer_type` is the type or class of a PEP 302 "Importer" (``sys.path``
+    item handler), and `distribution_finder` is a callable that, when passed a
+    path item, the importer instance, and an `only` flag, yields
+    ``Distribution`` instances found under that path item.  (The `only` flag,
+    if true, means the finder should yield only ``Distribution`` objects whose
+    ``location`` is equal to the path item provided.)
+
+    See the source of the ``pkg_resources.find_on_path`` function for an
+    example finder function.
+
+``register_loader_type(loader_type, provider_factory)``
+    Register `provider_factory` to make ``IResourceProvider`` objects for
+    `loader_type`.  `loader_type` is the type or class of a PEP 302
+    ``module.__loader__``, and `provider_factory` is a function that, when
+    passed a module object, returns an `IResourceProvider`_ for that module,
+    allowing it to be used with the `ResourceManager API`_.
+
+``register_namespace_handler(importer_type, namespace_handler)``
+    Register `namespace_handler` to declare namespace packages for the given
+    `importer_type`.  `importer_type` is the type or class of a PEP 302
+    "importer" (sys.path item handler), and `namespace_handler` is a callable
+    with a signature like this::
+
+        def namespace_handler(importer, path_entry, moduleName, module):
+            # return a path_entry to use for child packages
+
+    Namespace handlers are only called if the relevant importer object has
+    already agreed that it can handle the relevant path item.  The handler
+    should only return a subpath if the module ``__path__`` does not already
+    contain an equivalent subpath.  Otherwise, it should return None.
+
+    For an example namespace handler, see the source of the
+    ``pkg_resources.file_ns_handler`` function, which is used for both zipfile
+    importing and regular importing.
+
+
+IResourceProvider
+-----------------
+
+``IResourceProvider`` is an abstract class that documents what methods are
+required of objects returned by a `provider_factory` registered with
+``register_loader_type()``.  ``IResourceProvider`` is a subclass of
+``IMetadataProvider``, so objects that implement this interface must also
+implement all of the `IMetadataProvider Methods`_ as well as the methods
+shown here.  The `manager` argument to the methods below must be an object
+that supports the full `ResourceManager API`_ documented above.
+
+``get_resource_filename(manager, resource_name)``
+    Return a true filesystem path for `resource_name`, coordinating the
+    extraction with `manager`, if the resource must be unpacked to the
+    filesystem.
+
+``get_resource_stream(manager, resource_name)``
+    Return a readable file-like object for `resource_name`.
+
+``get_resource_string(manager, resource_name)``
+    Return a string containing the contents of `resource_name`.
+
+``has_resource(resource_name)``
+    Does the package contain the named resource?
+
+``resource_isdir(resource_name)``
+    Is the named resource a directory?  Return a false value if the resource
+    does not exist or is not a directory.
+
+``resource_listdir(resource_name)``
+    Return a list of the contents of the resource directory, ala
+    ``os.listdir()``.  Requesting the contents of a non-existent directory may
+    raise an exception.
+
+Note, by the way, that your provider classes need not (and should not) subclass
+``IResourceProvider`` or ``IMetadataProvider``!  These classes exist solely
+for documentation purposes and do not provide any useful implementation code.
+You may instead wish to subclass one of the `built-in resource providers`_.
+
+
+Built-in Resource Providers
+---------------------------
+
+``pkg_resources`` includes several provider classes that are automatically used
+where appropriate.  Their inheritance tree looks like this::
+
+    NullProvider
+        EggProvider
+            DefaultProvider
+                PathMetadata
+            ZipProvider
+                EggMetadata
+        EmptyProvider
+            FileMetadata
+
+
+``NullProvider``
+    This provider class is just an abstract base that provides for common
+    provider behaviors (such as running scripts), given a definition for just
+    a few abstract methods.
+
+``EggProvider``
+    This provider class adds in some egg-specific features that are common
+    to zipped and unzipped eggs.
+
+``DefaultProvider``
+    This provider class is used for unpacked eggs and "plain old Python"
+    filesystem modules.
+
+``ZipProvider``
+    This provider class is used for all zipped modules, whether they are eggs
+    or not.
+
+``EmptyProvider``
+    This provider class always returns answers consistent with a provider that
+    has no metadata or resources.  ``Distribution`` objects created without
+    a ``metadata`` argument use an instance of this provider class instead.
+    Since all ``EmptyProvider`` instances are equivalent, there is no need
+    to have more than one instance.  ``pkg_resources`` therefore creates a
+    global instance of this class under the name ``empty_provider``, and you
+    may use it if you have need of an ``EmptyProvider`` instance.
+
+``PathMetadata(path, egg_info)``
+    Create an ``IResourceProvider`` for a filesystem-based distribution, where
+    `path` is the filesystem location of the importable modules, and `egg_info`
+    is the filesystem location of the distribution's metadata directory.
+    `egg_info` should usually be the ``EGG-INFO`` subdirectory of `path` for an
+    "unpacked egg", and a ``ProjectName.egg-info`` subdirectory of `path` for
+    a "development egg".  However, other uses are possible for custom purposes.
+
+``EggMetadata(zipimporter)``
+    Create an ``IResourceProvider`` for a zipfile-based distribution.  The
+    `zipimporter` should be a ``zipimport.zipimporter`` instance, and may
+    represent a "basket" (a zipfile containing multiple ".egg" subdirectories)
+    a specific egg *within* a basket, or a zipfile egg (where the zipfile
+    itself is a ".egg").  It can also be a combination, such as a zipfile egg
+    that also contains other eggs.
+
+``FileMetadata(path_to_pkg_info)``
+    Create an ``IResourceProvider`` that provides exactly one metadata
+    resource: ``PKG-INFO``.  The supplied path should be a distutils PKG-INFO
+    file.  This is basically the same as an ``EmptyProvider``, except that
+    requests for ``PKG-INFO`` will be answered using the contents of the
+    designated file.  (This provider is used to wrap ``.egg-info`` files
+    installed by vendor-supplied system packages.)
+
+
+Utility Functions
+=================
+
+In addition to its high-level APIs, ``pkg_resources`` also includes several
+generally-useful utility routines.  These routines are used to implement the
+high-level APIs, but can also be quite useful by themselves.
+
+
+Parsing Utilities
+-----------------
+
+``parse_version(version)``
+    Parsed a project's version string as defined by PEP 440. The returned
+    value will be an object that represents the version. These objects may
+    be compared to each other and sorted. The sorting algorithm is as defined
+    by PEP 440 with the addition that any version which is not a valid PEP 440
+    version will be considered less than any valid PEP 440 version and the
+    invalid versions will continue sorting using the original algorithm.
+
+.. _yield_lines():
+
+``yield_lines(strs)``
+    Yield non-empty/non-comment lines from a string/unicode or a possibly-
+    nested sequence thereof.  If `strs` is an instance of ``basestring``, it
+    is split into lines, and each non-blank, non-comment line is yielded after
+    stripping leading and trailing whitespace.  (Lines whose first non-blank
+    character is ``#`` are considered comment lines.)
+
+    If `strs` is not an instance of ``basestring``, it is iterated over, and
+    each item is passed recursively to ``yield_lines()``, so that an arbitrarily
+    nested sequence of strings, or sequences of sequences of strings can be
+    flattened out to the lines contained therein.  So for example, passing
+    a file object or a list of strings to ``yield_lines`` will both work.
+    (Note that between each string in a sequence of strings there is assumed to
+    be an implicit line break, so lines cannot bridge two strings in a
+    sequence.)
+
+    This routine is used extensively by ``pkg_resources`` to parse metadata
+    and file formats of various kinds, and most other ``pkg_resources``
+    parsing functions that yield multiple values will use it to break up their
+    input.  However, this routine is idempotent, so calling ``yield_lines()``
+    on the output of another call to ``yield_lines()`` is completely harmless.
+
+``split_sections(strs)``
+    Split a string (or possibly-nested iterable thereof), yielding ``(section,
+    content)`` pairs found using an ``.ini``-like syntax.  Each ``section`` is
+    a whitespace-stripped version of the section name ("``[section]``")
+    and each ``content`` is a list of stripped lines excluding blank lines and
+    comment-only lines.  If there are any non-blank, non-comment lines before
+    the first section header, they're yielded in a first ``section`` of
+    ``None``.
+
+    This routine uses ``yield_lines()`` as its front end, so you can pass in
+    anything that ``yield_lines()`` accepts, such as an open text file, string,
+    or sequence of strings.  ``ValueError`` is raised if a malformed section
+    header is found (i.e. a line starting with ``[`` but not ending with
+    ``]``).
+
+    Note that this simplistic parser assumes that any line whose first nonblank
+    character is ``[`` is a section heading, so it can't support .ini format
+    variations that allow ``[`` as the first nonblank character on other lines.
+
+``safe_name(name)``
+    Return a "safe" form of a project's name, suitable for use in a
+    ``Requirement`` string, as a distribution name, or a PyPI project name.
+    All non-alphanumeric runs are condensed to single "-" characters, such that
+    a name like "The $$$ Tree" becomes "The-Tree".  Note that if you are
+    generating a filename from this value you should combine it with a call to
+    ``to_filename()`` so all dashes ("-") are replaced by underscores ("_").
+    See ``to_filename()``.
+
+``safe_version(version)``
+    This will return the normalized form of any PEP 440 version, if the version
+    string is not PEP 440 compatible than it is similar to ``safe_name()``
+    except that spaces in the input become dots, and dots are allowed to exist
+    in the output.  As with ``safe_name()``, if you are generating a filename
+    from this you should replace any "-" characters in the output with
+    underscores.
+
+``safe_extra(extra)``
+    Return a "safe" form of an extra's name, suitable for use in a requirement
+    string or a setup script's ``extras_require`` keyword.  This routine is
+    similar to ``safe_name()`` except that non-alphanumeric runs are replaced
+    by a single underbar (``_``), and the result is lowercased.
+
+``to_filename(name_or_version)``
+    Escape a name or version string so it can be used in a dash-separated
+    filename (or ``#egg=name-version`` tag) without ambiguity.  You
+    should only pass in values that were returned by ``safe_name()`` or
+    ``safe_version()``.
+
+
+Platform Utilities
+------------------
+
+``get_build_platform()``
+    Return this platform's identifier string.  For Windows, the return value
+    is ``"win32"``, and for Mac OS X it is a string of the form
+    ``"macosx-10.4-ppc"``.  All other platforms return the same uname-based
+    string that the ``distutils.util.get_platform()`` function returns.
+    This string is the minimum platform version required by distributions built
+    on the local machine.  (Backward compatibility note: setuptools versions
+    prior to 0.6b1 called this function ``get_platform()``, and the function is
+    still available under that name for backward compatibility reasons.)
+
+``get_supported_platform()`` (New in 0.6b1)
+    This is the similar to ``get_build_platform()``, but is the maximum
+    platform version that the local machine supports.  You will usually want
+    to use this value as the ``provided`` argument to the
+    ``compatible_platforms()`` function.
+
+``compatible_platforms(provided, required)``
+    Return true if a distribution built on the `provided` platform may be used
+    on the `required` platform.  If either platform value is ``None``, it is
+    considered a wildcard, and the platforms are therefore compatible.
+    Likewise, if the platform strings are equal, they're also considered
+    compatible, and ``True`` is returned.  Currently, the only non-equal
+    platform strings that are considered compatible are Mac OS X platform
+    strings with the same hardware type (e.g. ``ppc``) and major version
+    (e.g. ``10``) with the `provided` platform's minor version being less than
+    or equal to the `required` platform's minor version.
+
+``get_default_cache()``
+    Determine the default cache location for extracting resources from zipped
+    eggs.  This routine returns the ``PYTHON_EGG_CACHE`` environment variable,
+    if set.  Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of
+    the user's "Application Data" directory.  On all other systems, it returns
+    ``os.path.expanduser("~/.python-eggs")`` if ``PYTHON_EGG_CACHE`` is not
+    set.
+
+
+PEP 302 Utilities
+-----------------
+
+``get_importer(path_item)``
+    A deprecated alias for ``pkgutil.get_importer()``
+
+
+File/Path Utilities
+-------------------
+
+``ensure_directory(path)``
+    Ensure that the parent directory (``os.path.dirname``) of `path` actually
+    exists, using ``os.makedirs()`` if necessary.
+
+``normalize_path(path)``
+    Return a "normalized" version of `path`, such that two paths represent
+    the same filesystem location if they have equal ``normalized_path()``
+    values.  Specifically, this is a shortcut for calling ``os.path.realpath``
+    and ``os.path.normcase`` on `path`.  Unfortunately, on certain platforms
+    (notably Cygwin and Mac OS X) the ``normcase`` function does not accurately
+    reflect the platform's case-sensitivity, so there is always the possibility
+    of two apparently-different paths being equal on such platforms.
+
+History
+-------
+
+0.6c9
+ * Fix ``resource_listdir('')`` always returning an empty list for zipped eggs.
+
+0.6c7
+ * Fix package precedence problem where single-version eggs installed in
+   ``site-packages`` would take precedence over ``.egg`` files (or directories)
+   installed in ``site-packages``.
+
+0.6c6
+ * Fix extracted C extensions not having executable permissions under Cygwin.
+
+ * Allow ``.egg-link`` files to contain relative paths.
+
+ * Fix cache dir defaults on Windows when multiple environment vars are needed
+   to construct a path.
+
+0.6c4
+ * Fix "dev" versions being considered newer than release candidates.
+
+0.6c3
+ * Python 2.5 compatibility fixes.
+
+0.6c2
+ * Fix a problem with eggs specified directly on ``PYTHONPATH`` on
+   case-insensitive filesystems possibly not showing up in the default
+   working set, due to differing normalizations of ``sys.path`` entries.
+
+0.6b3
+ * Fixed a duplicate path insertion problem on case-insensitive filesystems.
+
+0.6b1
+ * Split ``get_platform()`` into ``get_supported_platform()`` and
+   ``get_build_platform()`` to work around a Mac versioning problem that caused
+   the behavior of ``compatible_platforms()`` to be platform specific.
+
+ * Fix entry point parsing when a standalone module name has whitespace
+   between it and the extras.
+
+0.6a11
+ * Added ``ExtractionError`` and ``ResourceManager.extraction_error()`` so that
+   cache permission problems get a more user-friendly explanation of the
+   problem, and so that programs can catch and handle extraction errors if they
+   need to.
+
+0.6a10
+ * Added the ``extras`` attribute to ``Distribution``, the ``find_plugins()``
+   method to ``WorkingSet``, and the ``__add__()`` and ``__iadd__()`` methods
+   to ``Environment``.
+
+ * ``safe_name()`` now allows dots in project names.
+
+ * There is a new ``to_filename()`` function that escapes project names and
+   versions for safe use in constructing egg filenames from a Distribution
+   object's metadata.
+
+ * Added ``Distribution.clone()`` method, and keyword argument support to other
+   ``Distribution`` constructors.
+
+ * Added the ``DEVELOP_DIST`` precedence, and automatically assign it to
+   eggs using ``.egg-info`` format.
+
+0.6a9
+ * Don't raise an error when an invalid (unfinished) distribution is found
+   unless absolutely necessary.  Warn about skipping invalid/unfinished eggs
+   when building an Environment.
+
+ * Added support for ``.egg-info`` files or directories with version/platform
+   information embedded in the filename, so that system packagers have the
+   option of including ``PKG-INFO`` files to indicate the presence of a
+   system-installed egg, without needing to use ``.egg`` directories, zipfiles,
+   or ``.pth`` manipulation.
+
+ * Changed ``parse_version()`` to remove dashes before pre-release tags, so
+   that ``0.2-rc1`` is considered an *older* version than ``0.2``, and is equal
+   to ``0.2rc1``.  The idea that a dash *always* meant a post-release version
+   was highly non-intuitive to setuptools users and Python developers, who
+   seem to want to use ``-rc`` version numbers a lot.
+
+0.6a8
+ * Fixed a problem with ``WorkingSet.resolve()`` that prevented version
+   conflicts from being detected at runtime.
+
+ * Improved runtime conflict warning message to identify a line in the user's
+   program, rather than flagging the ``warn()`` call in ``pkg_resources``.
+
+ * Avoid giving runtime conflict warnings for namespace packages, even if they
+   were declared by a different package than the one currently being activated.
+
+ * Fix path insertion algorithm for case-insensitive filesystems.
+
+ * Fixed a problem with nested namespace packages (e.g. ``peak.util``) not
+   being set as an attribute of their parent package.
+
+0.6a6
+ * Activated distributions are now inserted in ``sys.path`` (and the working
+   set) just before the directory that contains them, instead of at the end.
+   This allows e.g. eggs in ``site-packages`` to override unmanaged modules in
+   the same location, and allows eggs found earlier on ``sys.path`` to override
+   ones found later.
+
+ * When a distribution is activated, it now checks whether any contained
+   non-namespace modules have already been imported and issues a warning if
+   a conflicting module has already been imported.
+
+ * Changed dependency processing so that it's breadth-first, allowing a
+   depender's preferences to override those of a dependee, to prevent conflicts
+   when a lower version is acceptable to the dependee, but not the depender.
+
+ * Fixed a problem extracting zipped files on Windows, when the egg in question
+   has had changed contents but still has the same version number.
+
+0.6a4
+ * Fix a bug in ``WorkingSet.resolve()`` that was introduced in 0.6a3.
+
+0.6a3
+ * Added ``safe_extra()`` parsing utility routine, and use it for Requirement,
+   EntryPoint, and Distribution objects' extras handling.
+
+0.6a1
+ * Enhanced performance of ``require()`` and related operations when all
+   requirements are already in the working set, and enhanced performance of
+   directory scanning for distributions.
+
+ * Fixed some problems using ``pkg_resources`` w/PEP 302 loaders other than
+   ``zipimport``, and the previously-broken "eager resource" support.
+
+ * Fixed ``pkg_resources.resource_exists()`` not working correctly, along with
+   some other resource API bugs.
+
+ * Many API changes and enhancements:
+
+   * Added ``EntryPoint``, ``get_entry_map``, ``load_entry_point``, and
+     ``get_entry_info`` APIs for dynamic plugin discovery.
+
+   * ``list_resources`` is now ``resource_listdir`` (and it actually works)
+
+   * Resource API functions like ``resource_string()`` that accepted a package
+     name and resource name, will now also accept a ``Requirement`` object in
+     place of the package name (to allow access to non-package data files in
+     an egg).
+
+   * ``get_provider()`` will now accept a ``Requirement`` instance or a module
+     name.  If it is given a ``Requirement``, it will return a corresponding
+     ``Distribution`` (by calling ``require()`` if a suitable distribution
+     isn't already in the working set), rather than returning a metadata and
+     resource provider for a specific module.  (The difference is in how
+     resource paths are interpreted; supplying a module name means resources
+     path will be module-relative, rather than relative to the distribution's
+     root.)
+
+   * ``Distribution`` objects now implement the ``IResourceProvider`` and
+     ``IMetadataProvider`` interfaces, so you don't need to reference the (no
+     longer available) ``metadata`` attribute to get at these interfaces.
+
+   * ``Distribution`` and ``Requirement`` both have a ``project_name``
+     attribute for the project name they refer to.  (Previously these were
+     ``name`` and ``distname`` attributes.)
+
+   * The ``path`` attribute of ``Distribution`` objects is now ``location``,
+     because it isn't necessarily a filesystem path (and hasn't been for some
+     time now).  The ``location`` of ``Distribution`` objects in the filesystem
+     should always be normalized using ``pkg_resources.normalize_path()``; all
+     of the setuptools and EasyInstall code that generates distributions from
+     the filesystem (including ``Distribution.from_filename()``) ensure this
+     invariant, but if you use a more generic API like ``Distribution()`` or
+     ``Distribution.from_location()`` you should take care that you don't
+     create a distribution with an un-normalized filesystem path.
+
+   * ``Distribution`` objects now have an ``as_requirement()`` method that
+     returns a ``Requirement`` for the distribution's project name and version.
+
+   * Distribution objects no longer have an ``installed_on()`` method, and the
+     ``install_on()`` method is now ``activate()`` (but may go away altogether
+     soon).  The ``depends()`` method has also been renamed to ``requires()``,
+     and ``InvalidOption`` is now ``UnknownExtra``.
+
+   * ``find_distributions()`` now takes an additional argument called ``only``,
+     that tells it to only yield distributions whose location is the passed-in
+     path.  (It defaults to False, so that the default behavior is unchanged.)
+
+   * ``AvailableDistributions`` is now called ``Environment``, and the
+     ``get()``, ``__len__()``, and ``__contains__()`` methods were removed,
+     because they weren't particularly useful.  ``__getitem__()`` no longer
+     raises ``KeyError``; it just returns an empty list if there are no
+     distributions for the named project.
+
+   * The ``resolve()`` method of ``Environment`` is now a method of
+     ``WorkingSet`` instead, and the ``best_match()`` method now uses a working
+     set instead of a path list as its second argument.
+
+   * There is a new ``pkg_resources.add_activation_listener()`` API that lets
+     you register a callback for notifications about distributions added to
+     ``sys.path`` (including the distributions already on it).  This is
+     basically a hook for extensible applications and frameworks to be able to
+     search for plugin metadata in distributions added at runtime.
+
+0.5a13
+ * Fixed a bug in resource extraction from nested packages in a zipped egg.
+
+0.5a12
+ * Updated extraction/cache mechanism for zipped resources to avoid inter-
+   process and inter-thread races during extraction.  The default cache
+   location can now be set via the ``PYTHON_EGGS_CACHE`` environment variable,
+   and the default Windows cache is now a ``Python-Eggs`` subdirectory of the
+   current user's "Application Data" directory, if the ``PYTHON_EGGS_CACHE``
+   variable isn't set.
+
+0.5a10
+ * Fix a problem with ``pkg_resources`` being confused by non-existent eggs on
+   ``sys.path`` (e.g. if a user deletes an egg without removing it from the
+   ``easy-install.pth`` file).
+
+ * Fix a problem with "basket" support in ``pkg_resources``, where egg-finding
+   never actually went inside ``.egg`` files.
+
+ * Made ``pkg_resources`` import the module you request resources from, if it's
+   not already imported.
+
+0.5a4
+ * ``pkg_resources.AvailableDistributions.resolve()`` and related methods now
+   accept an ``installer`` argument: a callable taking one argument, a
+   ``Requirement`` instance.  The callable must return a ``Distribution``
+   object, or ``None`` if no distribution is found.  This feature is used by
+   EasyInstall to resolve dependencies by recursively invoking itself.
+
+0.4a4
+ * Fix problems with ``resource_listdir()``, ``resource_isdir()`` and resource
+   directory extraction for zipped eggs.
+
+0.4a3
+ * Fixed scripts not being able to see a ``__file__`` variable in ``__main__``
+
+ * Fixed a problem with ``resource_isdir()`` implementation that was introduced
+   in 0.4a2.
+
+0.4a1
+ * Fixed a bug in requirements processing for exact versions (i.e. ``==`` and
+   ``!=``) when only one condition was included.
+
+ * Added ``safe_name()`` and ``safe_version()`` APIs to clean up handling of
+   arbitrary distribution names and versions found on PyPI.
+
+0.3a4
+ * ``pkg_resources`` now supports resource directories, not just the resources
+   in them.  In particular, there are ``resource_listdir()`` and
+   ``resource_isdir()`` APIs.
+
+ * ``pkg_resources`` now supports "egg baskets" -- .egg zipfiles which contain
+   multiple distributions in subdirectories whose names end with ``.egg``.
+   Having such a "basket" in a directory on ``sys.path`` is equivalent to
+   having the individual eggs in that directory, but the contained eggs can
+   be individually added (or not) to ``sys.path``.  Currently, however, there
+   is no automated way to create baskets.
+
+ * Namespace package manipulation is now protected by the Python import lock.
+
+0.3a1
+ * Initial release.
+
diff --git a/docs/python3.txt b/docs/python3.txt
new file mode 100644
index 0000000..c528fc3
--- /dev/null
+++ b/docs/python3.txt
@@ -0,0 +1,94 @@
+=====================================================
+Supporting both Python 2 and Python 3 with Setuptools
+=====================================================
+
+Starting with Distribute version 0.6.2 and Setuptools 0.7, the Setuptools
+project supported Python 3. Installing and
+using setuptools for Python 3 code works exactly the same as for Python 2
+code.
+
+Setuptools provides a facility to invoke 2to3 on the code as a part of the
+build process, by setting the keyword parameter ``use_2to3`` to True, but
+the Setuptools strongly recommends instead developing a unified codebase
+using `six <https://pypi.org/project/six/>`_,
+`future <https://pypi.org/project/future/>`_, or another compatibility
+library.
+
+
+Using 2to3
+==========
+
+Setuptools attempts to make the porting process easier by automatically
+running
+2to3 as a part of running tests. To do so, you need to configure the
+setup.py so that you can run the unit tests with ``python setup.py test``.
+
+See :ref:`test` for more information on this.
+
+Once you have the tests running under Python 2, you can add the use_2to3
+keyword parameters to setup(), and start running the tests under Python 3.
+The test command will now first run the build command during which the code
+will be converted with 2to3, and the tests will then be run from the build
+directory, as opposed from the source directory as is normally done.
+
+Setuptools will convert all Python files, and also all doctests in Python
+files. However, if you have doctests located in separate text files, these
+will not automatically be converted. By adding them to the
+``convert_2to3_doctests`` keyword parameter Setuptools will convert them as
+well.
+
+By default, the conversion uses all fixers in the ``lib2to3.fixers`` package.
+To use additional fixers, the parameter ``use_2to3_fixers`` can be set
+to a list of names of packages containing fixers. To exclude fixers, the
+parameter ``use_2to3_exclude_fixers`` can be set to fixer names to be
+skipped.
+
+An example setup.py might look something like this::
+
+    from setuptools import setup
+
+    setup(
+        name='your.module',
+        version='1.0',
+        description='This is your awesome module',
+        author='You',
+        author_email='your@email',
+        package_dir={'': 'src'},
+        packages=['your', 'you.module'],
+        test_suite='your.module.tests',
+        use_2to3=True,
+        convert_2to3_doctests=['src/your/module/README.txt'],
+        use_2to3_fixers=['your.fixers'],
+        use_2to3_exclude_fixers=['lib2to3.fixes.fix_import'],
+    )
+
+Differential conversion
+-----------------------
+
+Note that a file will only be copied and converted during the build process
+if the source file has been changed. If you add a file to the doctests
+that should be converted, it will not be converted the next time you run
+the tests, since it hasn't been modified. You need to remove it from the
+build directory. Also if you run the build, install or test commands before
+adding the use_2to3 parameter, you will have to remove the build directory
+before you run the test command, as the files otherwise will seem updated,
+and no conversion will happen.
+
+In general, if code doesn't seem to be converted, deleting the build directory
+and trying again is a good safeguard against the build directory getting
+"out of sync" with the source directory.
+
+Distributing Python 3 modules
+=============================
+
+You can distribute your modules with Python 3 support in different ways. A
+normal source distribution will work, but can be slow in installing, as the
+2to3 process will be run during the install. But you can also distribute
+the module in binary format, such as a binary egg. That egg will contain the
+already converted code, and hence no 2to3 conversion is needed during install.
+
+Advanced features
+=================
+
+If you don't want to run the 2to3 conversion on the doctests in Python files,
+you can turn that off by setting ``setuptools.use_2to3_on_doctests = False``.
diff --git a/docs/releases.txt b/docs/releases.txt
new file mode 100644
index 0000000..30ea084
--- /dev/null
+++ b/docs/releases.txt
@@ -0,0 +1,48 @@
+===============
+Release Process
+===============
+
+In order to allow for rapid, predictable releases, Setuptools uses a
+mechanical technique for releases, enacted by Travis following a
+successful build of a tagged release per
+`PyPI deployment <https://docs.travis-ci.com/user/deployment/pypi>`_.
+
+Prior to cutting a release, please check that the CHANGES.rst reflects
+the summary of changes since the last release.
+Ideally, these changelog entries would have been added
+along with the changes, but it's always good to check.
+Think about it from the
+perspective of a user not involved with the development--what would
+that person want to know about what has changed--or from the
+perspective of your future self wanting to know when a particular
+change landed.
+
+To cut a release, install and run ``bump2version {part}`` where ``part``
+is major, minor, or patch based on the scope of the changes in the
+release. Then, push the commits to the master branch. If tests pass,
+the release will be uploaded to PyPI (from the Python 3.6 tests).
+
+Release Frequency
+-----------------
+
+Some have asked why Setuptools is released so frequently. Because Setuptools
+uses a mechanical release process, it's very easy to make releases whenever the
+code is stable (tests are passing). As a result, the philosophy is to release
+early and often.
+
+While some find the frequent releases somewhat surprising, they only empower
+the user. Although releases are made frequently, users can choose the frequency
+at which they use those releases. If instead Setuptools contributions were only
+released in batches, the user would be constrained to only use Setuptools when
+those official releases were made. With frequent releases, the user can govern
+exactly how often he wishes to update.
+
+Frequent releases also then obviate the need for dev or beta releases in most
+cases. Because releases are made early and often, bugs are discovered and
+corrected quickly, in many cases before other users have yet to encounter them.
+
+Release Managers
+----------------
+
+Additionally, anyone with push access to the master branch has access to cut
+releases.
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..2138c88
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,5 @@
+sphinx
+rst.linker>=1.9
+jaraco.packaging>=3.2
+
+setuptools>=34
diff --git a/docs/roadmap.txt b/docs/roadmap.txt
new file mode 100644
index 0000000..8f175b9
--- /dev/null
+++ b/docs/roadmap.txt
@@ -0,0 +1,6 @@
+=======
+Roadmap
+=======
+
+Setuptools is primarily in maintenance mode. The project attempts to address
+user issues, concerns, and feature requests in a timely fashion.
diff --git a/docs/setuptools.txt b/docs/setuptools.txt
new file mode 100644
index 0000000..e14d208
--- /dev/null
+++ b/docs/setuptools.txt
@@ -0,0 +1,2775 @@
+==================================================
+Building and Distributing Packages with Setuptools
+==================================================
+
+``Setuptools`` is a collection of enhancements to the Python ``distutils``
+that allow developers to more easily build and
+distribute Python packages, especially ones that have dependencies on other
+packages.
+
+Packages built and distributed using ``setuptools`` look to the user like
+ordinary Python packages based on the ``distutils``.  Your users don't need to
+install or even know about setuptools in order to use them, and you don't
+have to include the entire setuptools package in your distributions.  By
+including just a single `bootstrap module`_ (a 12K .py file), your package will
+automatically download and install ``setuptools`` if the user is building your
+package from source and doesn't have a suitable version already installed.
+
+.. _bootstrap module: https://bootstrap.pypa.io/ez_setup.py
+
+Feature Highlights:
+
+* Automatically find/download/install/upgrade dependencies at build time using
+  the `EasyInstall tool <easy_install.html>`_,
+  which supports downloading via HTTP, FTP, Subversion, and SourceForge, and
+  automatically scans web pages linked from PyPI to find download links.  (It's
+  the closest thing to CPAN currently available for Python.)
+
+* Create `Python Eggs <http://peak.telecommunity.com/DevCenter/PythonEggs>`_ -
+  a single-file importable distribution format
+
+* Enhanced support for accessing data files hosted in zipped packages.
+
+* Automatically include all packages in your source tree, without listing them
+  individually in setup.py
+
+* Automatically include all relevant files in your source distributions,
+  without needing to create a ``MANIFEST.in`` file, and without having to force
+  regeneration of the ``MANIFEST`` file when your source tree changes.
+
+* Automatically generate wrapper scripts or Windows (console and GUI) .exe
+  files for any number of "main" functions in your project.  (Note: this is not
+  a py2exe replacement; the .exe files rely on the local Python installation.)
+
+* Transparent Pyrex support, so that your setup.py can list ``.pyx`` files and
+  still work even when the end-user doesn't have Pyrex installed (as long as
+  you include the Pyrex-generated C in your source distribution)
+
+* Command aliases - create project-specific, per-user, or site-wide shortcut
+  names for commonly used commands and options
+
+* PyPI upload support - upload your source distributions and eggs to PyPI
+
+* Deploy your project in "development mode", such that it's available on
+  ``sys.path``, yet can still be edited directly from its source checkout.
+
+* Easily extend the distutils with new commands or ``setup()`` arguments, and
+  distribute/reuse your extensions for multiple projects, without copying code.
+
+* Create extensible applications and frameworks that automatically discover
+  extensions, using simple "entry points" declared in a project's setup script.
+
+.. contents:: **Table of Contents**
+
+.. _ez_setup.py: `bootstrap module`_
+
+
+-----------------
+Developer's Guide
+-----------------
+
+
+Installing ``setuptools``
+=========================
+
+Please follow the `EasyInstall Installation Instructions`_ to install the
+current stable version of setuptools.  In particular, be sure to read the
+section on `Custom Installation Locations`_ if you are installing anywhere
+other than Python's ``site-packages`` directory.
+
+.. _EasyInstall Installation Instructions: easy_install.html#installation-instructions
+
+.. _Custom Installation Locations: easy_install.html#custom-installation-locations
+
+If you want the current in-development version of setuptools, you should first
+install a stable version, and then run::
+
+    ez_setup.py setuptools==dev
+
+This will download and install the latest development (i.e. unstable) version
+of setuptools from the Python Subversion sandbox.
+
+
+Basic Use
+=========
+
+For basic use of setuptools, just import things from setuptools instead of
+the distutils.  Here's a minimal setup script using setuptools::
+
+    from setuptools import setup, find_packages
+    setup(
+        name="HelloWorld",
+        version="0.1",
+        packages=find_packages(),
+    )
+
+As you can see, it doesn't take much to use setuptools in a project.
+Run that script in your project folder, alongside the Python packages
+you have developed.
+
+Invoke that script to produce eggs, upload to
+PyPI, and automatically include all packages in the directory where the
+setup.py lives.  See the `Command Reference`_ section below to see what
+commands you can give to this setup script. For example,
+to produce a source distribution, simply invoke::
+
+    python setup.py sdist
+
+Of course, before you release your project to PyPI, you'll want to add a bit
+more information to your setup script to help people find or learn about your
+project.  And maybe your project will have grown by then to include a few
+dependencies, and perhaps some data files and scripts::
+
+    from setuptools import setup, find_packages
+    setup(
+        name="HelloWorld",
+        version="0.1",
+        packages=find_packages(),
+        scripts=['say_hello.py'],
+
+        # Project uses reStructuredText, so ensure that the docutils get
+        # installed or upgraded on the target machine
+        install_requires=['docutils>=0.3'],
+
+        package_data={
+            # If any package contains *.txt or *.rst files, include them:
+            '': ['*.txt', '*.rst'],
+            # And include any *.msg files found in the 'hello' package, too:
+            'hello': ['*.msg'],
+        },
+
+        # metadata for upload to PyPI
+        author="Me",
+        author_email="me@example.com",
+        description="This is an Example Package",
+        license="PSF",
+        keywords="hello world example examples",
+        url="http://example.com/HelloWorld/",   # project home page, if any
+        project_urls={
+            "Bug Tracker": "https://bugs.example.com/HelloWorld/",
+            "Documentation": "https://docs.example.com/HelloWorld/",
+            "Source Code": "https://code.example.com/HelloWorld/",
+        }
+
+        # could also include long_description, download_url, classifiers, etc.
+    )
+
+In the sections that follow, we'll explain what most of these ``setup()``
+arguments do (except for the metadata ones), and the various ways you might use
+them in your own project(s).
+
+
+Specifying Your Project's Version
+---------------------------------
+
+Setuptools can work well with most versioning schemes; there are, however, a
+few special things to watch out for, in order to ensure that setuptools and
+EasyInstall can always tell what version of your package is newer than another
+version.  Knowing these things will also help you correctly specify what
+versions of other projects your project depends on.
+
+A version consists of an alternating series of release numbers and pre-release
+or post-release tags.  A release number is a series of digits punctuated by
+dots, such as ``2.4`` or ``0.5``.  Each series of digits is treated
+numerically, so releases ``2.1`` and ``2.1.0`` are different ways to spell the
+same release number, denoting the first subrelease of release 2.  But  ``2.10``
+is the *tenth* subrelease of release 2, and so is a different and newer release
+from ``2.1`` or ``2.1.0``.  Leading zeros within a series of digits are also
+ignored, so ``2.01`` is the same as ``2.1``, and different from ``2.0.1``.
+
+Following a release number, you can have either a pre-release or post-release
+tag.  Pre-release tags make a version be considered *older* than the version
+they are appended to.  So, revision ``2.4`` is *newer* than revision ``2.4c1``,
+which in turn is newer than ``2.4b1`` or ``2.4a1``.  Postrelease tags make
+a version be considered *newer* than the version they are appended to.  So,
+revisions like ``2.4-1`` and ``2.4pl3`` are newer than ``2.4``, but are *older*
+than ``2.4.1`` (which has a higher release number).
+
+A pre-release tag is a series of letters that are alphabetically before
+"final".  Some examples of prerelease tags would include ``alpha``, ``beta``,
+``a``, ``c``, ``dev``, and so on.  You do not have to place a dot or dash
+before the prerelease tag if it's immediately after a number, but it's okay to
+do so if you prefer.  Thus, ``2.4c1`` and ``2.4.c1`` and ``2.4-c1`` all
+represent release candidate 1 of version ``2.4``, and are treated as identical
+by setuptools.
+
+In addition, there are three special prerelease tags that are treated as if
+they were the letter ``c``: ``pre``, ``preview``, and ``rc``.  So, version
+``2.4rc1``, ``2.4pre1`` and ``2.4preview1`` are all the exact same version as
+``2.4c1``, and are treated as identical by setuptools.
+
+A post-release tag is either a series of letters that are alphabetically
+greater than or equal to "final", or a dash (``-``).  Post-release tags are
+generally used to separate patch numbers, port numbers, build numbers, revision
+numbers, or date stamps from the release number.  For example, the version
+``2.4-r1263`` might denote Subversion revision 1263 of a post-release patch of
+version ``2.4``.  Or you might use ``2.4-20051127`` to denote a date-stamped
+post-release.
+
+Notice that after each pre or post-release tag, you are free to place another
+release number, followed again by more pre- or post-release tags.  For example,
+``0.6a9.dev-r41475`` could denote Subversion revision 41475 of the in-
+development version of the ninth alpha of release 0.6.  Notice that ``dev`` is
+a pre-release tag, so this version is a *lower* version number than ``0.6a9``,
+which would be the actual ninth alpha of release 0.6.  But the ``-r41475`` is
+a post-release tag, so this version is *newer* than ``0.6a9.dev``.
+
+For the most part, setuptools' interpretation of version numbers is intuitive,
+but here are a few tips that will keep you out of trouble in the corner cases:
+
+* Don't stick adjoining pre-release tags together without a dot or number
+  between them.  Version ``1.9adev`` is the ``adev`` prerelease of ``1.9``,
+  *not* a development pre-release of ``1.9a``.  Use ``.dev`` instead, as in
+  ``1.9a.dev``, or separate the prerelease tags with a number, as in
+  ``1.9a0dev``.  ``1.9a.dev``, ``1.9a0dev``, and even ``1.9.a.dev`` are
+  identical versions from setuptools' point of view, so you can use whatever
+  scheme you prefer.
+
+* If you want to be certain that your chosen numbering scheme works the way
+  you think it will, you can use the ``pkg_resources.parse_version()`` function
+  to compare different version numbers::
+
+    >>> from pkg_resources import parse_version
+    >>> parse_version('1.9.a.dev') == parse_version('1.9a0dev')
+    True
+    >>> parse_version('2.1-rc2') < parse_version('2.1')
+    True
+    >>> parse_version('0.6a9dev-r41475') < parse_version('0.6a9')
+    True
+
+Once you've decided on a version numbering scheme for your project, you can
+have setuptools automatically tag your in-development releases with various
+pre- or post-release tags.  See the following sections for more details:
+
+* `Tagging and "Daily Build" or "Snapshot" Releases`_
+* `Managing "Continuous Releases" Using Subversion`_
+* The `egg_info`_ command
+
+
+New and Changed ``setup()`` Keywords
+====================================
+
+The following keyword arguments to ``setup()`` are added or changed by
+``setuptools``.  All of them are optional; you do not have to supply them
+unless you need the associated ``setuptools`` feature.
+
+``include_package_data``
+    If set to ``True``, this tells ``setuptools`` to automatically include any
+    data files it finds inside your package directories that are specified by
+    your ``MANIFEST.in`` file.  For more information, see the section below on
+    `Including Data Files`_.
+
+``exclude_package_data``
+    A dictionary mapping package names to lists of glob patterns that should
+    be *excluded* from your package directories.  You can use this to trim back
+    any excess files included by ``include_package_data``.  For a complete
+    description and examples, see the section below on `Including Data Files`_.
+
+``package_data``
+    A dictionary mapping package names to lists of glob patterns.  For a
+    complete description and examples, see the section below on `Including
+    Data Files`_.  You do not need to use this option if you are using
+    ``include_package_data``, unless you need to add e.g. files that are
+    generated by your setup script and build process.  (And are therefore not
+    in source control or are files that you don't want to include in your
+    source distribution.)
+
+``zip_safe``
+    A boolean (True or False) flag specifying whether the project can be
+    safely installed and run from a zip file.  If this argument is not
+    supplied, the ``bdist_egg`` command will have to analyze all of your
+    project's contents for possible problems each time it builds an egg.
+
+``install_requires``
+    A string or list of strings specifying what other distributions need to
+    be installed when this one is.  See the section below on `Declaring
+    Dependencies`_ for details and examples of the format of this argument.
+
+``entry_points``
+    A dictionary mapping entry point group names to strings or lists of strings
+    defining the entry points.  Entry points are used to support dynamic
+    discovery of services or plugins provided by a project.  See `Dynamic
+    Discovery of Services and Plugins`_ for details and examples of the format
+    of this argument.  In addition, this keyword is used to support `Automatic
+    Script Creation`_.
+
+``extras_require``
+    A dictionary mapping names of "extras" (optional features of your project)
+    to strings or lists of strings specifying what other distributions must be
+    installed to support those features.  See the section below on `Declaring
+    Dependencies`_ for details and examples of the format of this argument.
+
+``python_requires``
+    A string corresponding to a version specifier (as defined in PEP 440) for
+    the Python version, used to specify the Requires-Python defined in PEP 345.
+
+``setup_requires``
+    A string or list of strings specifying what other distributions need to
+    be present in order for the *setup script* to run.  ``setuptools`` will
+    attempt to obtain these (even going so far as to download them using
+    ``EasyInstall``) before processing the rest of the setup script or commands.
+    This argument is needed if you are using distutils extensions as part of
+    your build process; for example, extensions that process setup() arguments
+    and turn them into EGG-INFO metadata files.
+
+    (Note: projects listed in ``setup_requires`` will NOT be automatically
+    installed on the system where the setup script is being run.  They are
+    simply downloaded to the ./.eggs directory if they're not locally available
+    already.  If you want them to be installed, as well as being available
+    when the setup script is run, you should add them to ``install_requires``
+    **and** ``setup_requires``.)
+
+``dependency_links``
+    A list of strings naming URLs to be searched when satisfying dependencies.
+    These links will be used if needed to install packages specified by
+    ``setup_requires`` or ``tests_require``.  They will also be written into
+    the egg's metadata for use by tools like EasyInstall to use when installing
+    an ``.egg`` file.
+
+``namespace_packages``
+    A list of strings naming the project's "namespace packages".  A namespace
+    package is a package that may be split across multiple project
+    distributions.  For example, Zope 3's ``zope`` package is a namespace
+    package, because subpackages like ``zope.interface`` and ``zope.publisher``
+    may be distributed separately.  The egg runtime system can automatically
+    merge such subpackages into a single parent package at runtime, as long
+    as you declare them in each project that contains any subpackages of the
+    namespace package, and as long as the namespace package's ``__init__.py``
+    does not contain any code other than a namespace declaration.  See the
+    section below on `Namespace Packages`_ for more information.
+
+``test_suite``
+    A string naming a ``unittest.TestCase`` subclass (or a package or module
+    containing one or more of them, or a method of such a subclass), or naming
+    a function that can be called with no arguments and returns a
+    ``unittest.TestSuite``.  If the named suite is a module, and the module
+    has an ``additional_tests()`` function, it is called and the results are
+    added to the tests to be run.  If the named suite is a package, any
+    submodules and subpackages are recursively added to the overall test suite.
+
+    Specifying this argument enables use of the `test`_ command to run the
+    specified test suite, e.g. via ``setup.py test``.  See the section on the
+    `test`_ command below for more details.
+
+``tests_require``
+    If your project's tests need one or more additional packages besides those
+    needed to install it, you can use this option to specify them.  It should
+    be a string or list of strings specifying what other distributions need to
+    be present for the package's tests to run.  When you run the ``test``
+    command, ``setuptools`` will  attempt to obtain these (even going
+    so far as to download them using ``EasyInstall``).  Note that these
+    required projects will *not* be installed on the system where the tests
+    are run, but only downloaded to the project's setup directory if they're
+    not already installed locally.
+
+.. _test_loader:
+
+``test_loader``
+    If you would like to use a different way of finding tests to run than what
+    setuptools normally uses, you can specify a module name and class name in
+    this argument.  The named class must be instantiable with no arguments, and
+    its instances must support the ``loadTestsFromNames()`` method as defined
+    in the Python ``unittest`` module's ``TestLoader`` class.  Setuptools will
+    pass only one test "name" in the `names` argument: the value supplied for
+    the ``test_suite`` argument.  The loader you specify may interpret this
+    string in any way it likes, as there are no restrictions on what may be
+    contained in a ``test_suite`` string.
+
+    The module name and class name must be separated by a ``:``.  The default
+    value of this argument is ``"setuptools.command.test:ScanningLoader"``.  If
+    you want to use the default ``unittest`` behavior, you can specify
+    ``"unittest:TestLoader"`` as your ``test_loader`` argument instead.  This
+    will prevent automatic scanning of submodules and subpackages.
+
+    The module and class you specify here may be contained in another package,
+    as long as you use the ``tests_require`` option to ensure that the package
+    containing the loader class is available when the ``test`` command is run.
+
+``eager_resources``
+    A list of strings naming resources that should be extracted together, if
+    any of them is needed, or if any C extensions included in the project are
+    imported.  This argument is only useful if the project will be installed as
+    a zipfile, and there is a need to have all of the listed resources be
+    extracted to the filesystem *as a unit*.  Resources listed here
+    should be '/'-separated paths, relative to the source root, so to list a
+    resource ``foo.png`` in package ``bar.baz``, you would include the string
+    ``bar/baz/foo.png`` in this argument.
+
+    If you only need to obtain resources one at a time, or you don't have any C
+    extensions that access other files in the project (such as data files or
+    shared libraries), you probably do NOT need this argument and shouldn't
+    mess with it.  For more details on how this argument works, see the section
+    below on `Automatic Resource Extraction`_.
+
+``use_2to3``
+    Convert the source code from Python 2 to Python 3 with 2to3 during the
+    build process. See :doc:`python3` for more details.
+
+``convert_2to3_doctests``
+    List of doctest source files that need to be converted with 2to3.
+    See :doc:`python3` for more details.
+
+``use_2to3_fixers``
+    A list of modules to search for additional fixers to be used during
+    the 2to3 conversion. See :doc:`python3` for more details.
+
+``project_urls``
+    An arbitrary map of URL names to hyperlinks, allowing more extensible
+    documentation of where various resources can be found than the simple
+    ``url`` and ``download_url`` options provide.
+
+
+Using ``find_packages()``
+-------------------------
+
+For simple projects, it's usually easy enough to manually add packages to
+the ``packages`` argument of ``setup()``.  However, for very large projects
+(Twisted, PEAK, Zope, Chandler, etc.), it can be a big burden to keep the
+package list updated.  That's what ``setuptools.find_packages()`` is for.
+
+``find_packages()`` takes a source directory and two lists of package name
+patterns to exclude and include.  If omitted, the source directory defaults to
+the same
+directory as the setup script.  Some projects use a ``src`` or ``lib``
+directory as the root of their source tree, and those projects would of course
+use ``"src"`` or ``"lib"`` as the first argument to ``find_packages()``.  (And
+such projects also need something like ``package_dir={'':'src'}`` in their
+``setup()`` arguments, but that's just a normal distutils thing.)
+
+Anyway, ``find_packages()`` walks the target directory, filtering by inclusion
+patterns, and finds Python packages (any directory). Packages are only
+recognized if they include an ``__init__.py`` file. Finally, exclusion 
+patterns are applied to remove matching packages.
+
+Inclusion and exclusion patterns are package names, optionally including
+wildcards.  For
+example, ``find_packages(exclude=["*.tests"])`` will exclude all packages whose
+last name part is ``tests``.   Or, ``find_packages(exclude=["*.tests",
+"*.tests.*"])`` will also exclude any subpackages of packages named ``tests``,
+but it still won't exclude a top-level ``tests`` package or the children
+thereof.  In fact, if you really want no ``tests`` packages at all, you'll need
+something like this::
+
+    find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
+
+in order to cover all the bases.  Really, the exclusion patterns are intended
+to cover simpler use cases than this, like excluding a single, specified
+package and its subpackages.
+
+Regardless of the parameters, the ``find_packages()``
+function returns a list of package names suitable for use as the ``packages``
+argument to ``setup()``, and so is usually the easiest way to set that
+argument in your setup script.  Especially since it frees you from having to
+remember to modify your setup script whenever your project grows additional
+top-level packages or subpackages.
+
+
+Automatic Script Creation
+=========================
+
+Packaging and installing scripts can be a bit awkward with the distutils.  For
+one thing, there's no easy way to have a script's filename match local
+conventions on both Windows and POSIX platforms.  For another, you often have
+to create a separate file just for the "main" script, when your actual "main"
+is a function in a module somewhere.  And even in Python 2.4, using the ``-m``
+option only works for actual ``.py`` files that aren't installed in a package.
+
+``setuptools`` fixes all of these problems by automatically generating scripts
+for you with the correct extension, and on Windows it will even create an
+``.exe`` file so that users don't have to change their ``PATHEXT`` settings.
+The way to use this feature is to define "entry points" in your setup script
+that indicate what function the generated script should import and run.  For
+example, to create two console scripts called ``foo`` and ``bar``, and a GUI
+script called ``baz``, you might do something like this::
+
+    setup(
+        # other arguments here...
+        entry_points={
+            'console_scripts': [
+                'foo = my_package.some_module:main_func',
+                'bar = other_module:some_func',
+            ],
+            'gui_scripts': [
+                'baz = my_package_gui:start_func',
+            ]
+        }
+    )
+
+When this project is installed on non-Windows platforms (using "setup.py
+install", "setup.py develop", or by using EasyInstall), a set of ``foo``,
+``bar``, and ``baz`` scripts will be installed that import ``main_func`` and
+``some_func`` from the specified modules.  The functions you specify are called
+with no arguments, and their return value is passed to ``sys.exit()``, so you
+can return an errorlevel or message to print to stderr.
+
+On Windows, a set of ``foo.exe``, ``bar.exe``, and ``baz.exe`` launchers are
+created, alongside a set of ``foo.py``, ``bar.py``, and ``baz.pyw`` files.  The
+``.exe`` wrappers find and execute the right version of Python to run the
+``.py`` or ``.pyw`` file.
+
+You may define as many "console script" and "gui script" entry points as you
+like, and each one can optionally specify "extras" that it depends on, that
+will be added to ``sys.path`` when the script is run.  For more information on
+"extras", see the section below on `Declaring Extras`_.  For more information
+on "entry points" in general, see the section below on `Dynamic Discovery of
+Services and Plugins`_.
+
+
+"Eggsecutable" Scripts
+----------------------
+
+Occasionally, there are situations where it's desirable to make an ``.egg``
+file directly executable.  You can do this by including an entry point such
+as the following::
+
+    setup(
+        # other arguments here...
+        entry_points={
+            'setuptools.installation': [
+                'eggsecutable = my_package.some_module:main_func',
+            ]
+        }
+    )
+
+Any eggs built from the above setup script will include a short executable
+prelude that imports and calls ``main_func()`` from ``my_package.some_module``.
+The prelude can be run on Unix-like platforms (including Mac and Linux) by
+invoking the egg with ``/bin/sh``, or by enabling execute permissions on the
+``.egg`` file.  For the executable prelude to run, the appropriate version of
+Python must be available via the ``PATH`` environment variable, under its
+"long" name.  That is, if the egg is built for Python 2.3, there must be a
+``python2.3`` executable present in a directory on ``PATH``.
+
+This feature is primarily intended to support ez_setup the installation of
+setuptools itself on non-Windows platforms, but may also be useful for other
+projects as well.
+
+IMPORTANT NOTE: Eggs with an "eggsecutable" header cannot be renamed, or
+invoked via symlinks.  They *must* be invoked using their original filename, in
+order to ensure that, once running, ``pkg_resources`` will know what project
+and version is in use.  The header script will check this and exit with an
+error if the ``.egg`` file has been renamed or is invoked via a symlink that
+changes its base name.
+
+
+Declaring Dependencies
+======================
+
+``setuptools`` supports automatically installing dependencies when a package is
+installed, and including information about dependencies in Python Eggs (so that
+package management tools like EasyInstall can use the information).
+
+``setuptools`` and ``pkg_resources`` use a common syntax for specifying a
+project's required dependencies.  This syntax consists of a project's PyPI
+name, optionally followed by a comma-separated list of "extras" in square
+brackets, optionally followed by a comma-separated list of version
+specifiers.  A version specifier is one of the operators ``<``, ``>``, ``<=``,
+``>=``, ``==`` or ``!=``, followed by a version identifier.  Tokens may be
+separated by whitespace, but any whitespace or nonstandard characters within a
+project name or version identifier must be replaced with ``-``.
+
+Version specifiers for a given project are internally sorted into ascending
+version order, and used to establish what ranges of versions are acceptable.
+Adjacent redundant conditions are also consolidated (e.g. ``">1, >2"`` becomes
+``">2"``, and ``"<2,<3"`` becomes ``"<2"``). ``"!="`` versions are excised from
+the ranges they fall within.  A project's version is then checked for
+membership in the resulting ranges. (Note that providing conflicting conditions
+for the same version (e.g. "<2,>=2" or "==2,!=2") is meaningless and may
+therefore produce bizarre results.)
+
+Here are some example requirement specifiers::
+
+    docutils >= 0.3
+
+    # comment lines and \ continuations are allowed in requirement strings
+    BazSpam ==1.1, ==1.2, ==1.3, ==1.4, ==1.5, \
+        ==1.6, ==1.7  # and so are line-end comments
+
+    PEAK[FastCGI, reST]>=0.5a4
+
+    setuptools==0.5a7
+
+The simplest way to include requirement specifiers is to use the
+``install_requires`` argument to ``setup()``.  It takes a string or list of
+strings containing requirement specifiers.  If you include more than one
+requirement in a string, each requirement must begin on a new line.
+
+This has three effects:
+
+1. When your project is installed, either by using EasyInstall, ``setup.py
+   install``, or ``setup.py develop``, all of the dependencies not already
+   installed will be located (via PyPI), downloaded, built (if necessary),
+   and installed.
+
+2. Any scripts in your project will be installed with wrappers that verify
+   the availability of the specified dependencies at runtime, and ensure that
+   the correct versions are added to ``sys.path`` (e.g. if multiple versions
+   have been installed).
+
+3. Python Egg distributions will include a metadata file listing the
+   dependencies.
+
+Note, by the way, that if you declare your dependencies in ``setup.py``, you do
+*not* need to use the ``require()`` function in your scripts or modules, as
+long as you either install the project or use ``setup.py develop`` to do
+development work on it.  (See `"Development Mode"`_ below for more details on
+using ``setup.py develop``.)
+
+
+Dependencies that aren't in PyPI
+--------------------------------
+
+If your project depends on packages that aren't registered in PyPI, you may
+still be able to depend on them, as long as they are available for download
+as:
+
+- an egg, in the standard distutils ``sdist`` format,
+- a single ``.py`` file, or
+- a VCS repository (Subversion, Mercurial, or Git).
+
+You just need to add some URLs to the ``dependency_links`` argument to
+``setup()``.
+
+The URLs must be either:
+
+1. direct download URLs,
+2. the URLs of web pages that contain direct download links, or
+3. the repository's URL
+
+In general, it's better to link to web pages, because it is usually less
+complex to update a web page than to release a new version of your project.
+You can also use a SourceForge ``showfiles.php`` link in the case where a
+package you depend on is distributed via SourceForge.
+
+If you depend on a package that's distributed as a single ``.py`` file, you
+must include an ``"#egg=project-version"`` suffix to the URL, to give a project
+name and version number.  (Be sure to escape any dashes in the name or version
+by replacing them with underscores.)  EasyInstall will recognize this suffix
+and automatically create a trivial ``setup.py`` to wrap the single ``.py`` file
+as an egg.
+
+In the case of a VCS checkout, you should also append ``#egg=project-version``
+in order to identify for what package that checkout should be used. You can
+append ``@REV`` to the URL's path (before the fragment) to specify a revision.
+Additionally, you can also force the VCS being used by prepending the URL with
+a certain prefix. Currently available are:
+
+-  ``svn+URL`` for Subversion,
+-  ``git+URL`` for Git, and
+-  ``hg+URL`` for Mercurial
+
+A more complete example would be:
+
+    ``vcs+proto://host/path@revision#egg=project-version``
+
+Be careful with the version. It should match the one inside the project files.
+If you want to disregard the version, you have to omit it both in the
+``requires`` and in the URL's fragment.
+
+This will do a checkout (or a clone, in Git and Mercurial parlance) to a
+temporary folder and run ``setup.py bdist_egg``.
+
+The ``dependency_links`` option takes the form of a list of URL strings.  For
+example, the below will cause EasyInstall to search the specified page for
+eggs or source distributions, if the package's dependencies aren't already
+installed::
+
+    setup(
+        ...
+        dependency_links=[
+            "http://peak.telecommunity.com/snapshots/"
+        ],
+    )
+
+
+.. _Declaring Extras:
+
+
+Declaring "Extras" (optional features with their own dependencies)
+------------------------------------------------------------------
+
+Sometimes a project has "recommended" dependencies, that are not required for
+all uses of the project.  For example, a project might offer optional PDF
+output if ReportLab is installed, and reStructuredText support if docutils is
+installed.  These optional features are called "extras", and setuptools allows
+you to define their requirements as well.  In this way, other projects that
+require these optional features can force the additional requirements to be
+installed, by naming the desired extras in their ``install_requires``.
+
+For example, let's say that Project A offers optional PDF and reST support::
+
+    setup(
+        name="Project-A",
+        ...
+        extras_require={
+            'PDF':  ["ReportLab>=1.2", "RXP"],
+            'reST': ["docutils>=0.3"],
+        }
+    )
+
+As you can see, the ``extras_require`` argument takes a dictionary mapping
+names of "extra" features, to strings or lists of strings describing those
+features' requirements.  These requirements will *not* be automatically
+installed unless another package depends on them (directly or indirectly) by
+including the desired "extras" in square brackets after the associated project
+name.  (Or if the extras were listed in a requirement spec on the EasyInstall
+command line.)
+
+Extras can be used by a project's `entry points`_ to specify dynamic
+dependencies.  For example, if Project A includes a "rst2pdf" script, it might
+declare it like this, so that the "PDF" requirements are only resolved if the
+"rst2pdf" script is run::
+
+    setup(
+        name="Project-A",
+        ...
+        entry_points={
+            'console_scripts': [
+                'rst2pdf = project_a.tools.pdfgen [PDF]',
+                'rst2html = project_a.tools.htmlgen',
+                # more script entry points ...
+            ],
+        }
+    )
+
+Projects can also use another project's extras when specifying dependencies.
+For example, if project B needs "project A" with PDF support installed, it
+might declare the dependency like this::
+
+    setup(
+        name="Project-B",
+        install_requires=["Project-A[PDF]"],
+        ...
+    )
+
+This will cause ReportLab to be installed along with project A, if project B is
+installed -- even if project A was already installed.  In this way, a project
+can encapsulate groups of optional "downstream dependencies" under a feature
+name, so that packages that depend on it don't have to know what the downstream
+dependencies are.  If a later version of Project A builds in PDF support and
+no longer needs ReportLab, or if it ends up needing other dependencies besides
+ReportLab in order to provide PDF support, Project B's setup information does
+not need to change, but the right packages will still be installed if needed.
+
+Note, by the way, that if a project ends up not needing any other packages to
+support a feature, it should keep an empty requirements list for that feature
+in its ``extras_require`` argument, so that packages depending on that feature
+don't break (due to an invalid feature name).  For example, if Project A above
+builds in PDF support and no longer needs ReportLab, it could change its
+setup to this::
+
+    setup(
+        name="Project-A",
+        ...
+        extras_require={
+            'PDF':  [],
+            'reST': ["docutils>=0.3"],
+        }
+    )
+
+so that Package B doesn't have to remove the ``[PDF]`` from its requirement
+specifier.
+
+
+.. _Platform Specific Dependencies:
+
+
+Declaring platform specific dependencies
+----------------------------------------
+
+Sometimes a project might require a dependency to run on a specific platform.
+This could to a package that back ports a module so that it can be used in
+older python versions.  Or it could be a package that is required to run on a
+specific operating system.  This will allow a project to work on multiple
+different platforms without installing dependencies that are not required for
+a platform that is installing the project.
+
+For example, here is a project that uses the ``enum`` module and ``pywin32``::
+
+    setup(
+        name="Project",
+        ...
+        install_requires=[
+            'enum34;python_version<"3.4"',
+            'pywin32 >= 1.0;platform_system=="Windows"'
+        ]
+    )
+
+Since the ``enum`` module was added in Python 3.4, it should only be installed
+if the python version is earlier.  Since ``pywin32`` will only be used on
+windows, it should only be installed when the operating system is Windows.
+Specifying version requirements for the dependencies is supported as normal.
+
+The environmental markers that may be used for testing platform types are
+detailed in `PEP 508`_.
+
+.. _PEP 508: https://www.python.org/dev/peps/pep-0508/
+
+Including Data Files
+====================
+
+The distutils have traditionally allowed installation of "data files", which
+are placed in a platform-specific location.  However, the most common use case
+for data files distributed with a package is for use *by* the package, usually
+by including the data files in the package directory.
+
+Setuptools offers three ways to specify data files to be included in your
+packages.  First, you can simply use the ``include_package_data`` keyword,
+e.g.::
+
+    from setuptools import setup, find_packages
+    setup(
+        ...
+        include_package_data=True
+    )
+
+This tells setuptools to install any data files it finds in your packages.
+The data files must be specified via the distutils' ``MANIFEST.in`` file.
+(They can also be tracked by a revision control system, using an appropriate
+plugin.  See the section below on `Adding Support for Revision Control
+Systems`_ for information on how to write such plugins.)
+
+If you want finer-grained control over what files are included (for example,
+if you have documentation files in your package directories and want to exclude
+them from installation), then you can also use the ``package_data`` keyword,
+e.g.::
+
+    from setuptools import setup, find_packages
+    setup(
+        ...
+        package_data={
+            # If any package contains *.txt or *.rst files, include them:
+            '': ['*.txt', '*.rst'],
+            # And include any *.msg files found in the 'hello' package, too:
+            'hello': ['*.msg'],
+        }
+    )
+
+The ``package_data`` argument is a dictionary that maps from package names to
+lists of glob patterns.  The globs may include subdirectory names, if the data
+files are contained in a subdirectory of the package.  For example, if the
+package tree looks like this::
+
+    setup.py
+    src/
+        mypkg/
+            __init__.py
+            mypkg.txt
+            data/
+                somefile.dat
+                otherdata.dat
+
+The setuptools setup file might look like this::
+
+    from setuptools import setup, find_packages
+    setup(
+        ...
+        packages=find_packages('src'),  # include all packages under src
+        package_dir={'':'src'},   # tell distutils packages are under src
+
+        package_data={
+            # If any package contains *.txt files, include them:
+            '': ['*.txt'],
+            # And include any *.dat files found in the 'data' subdirectory
+            # of the 'mypkg' package, also:
+            'mypkg': ['data/*.dat'],
+        }
+    )
+
+Notice that if you list patterns in ``package_data`` under the empty string,
+these patterns are used to find files in every package, even ones that also
+have their own patterns listed.  Thus, in the above example, the ``mypkg.txt``
+file gets included even though it's not listed in the patterns for ``mypkg``.
+
+Also notice that if you use paths, you *must* use a forward slash (``/``) as
+the path separator, even if you are on Windows.  Setuptools automatically
+converts slashes to appropriate platform-specific separators at build time.
+
+If datafiles are contained in a subdirectory of a package that isn't a package
+itself (no ``__init__.py``), then the subdirectory names (or ``*``) are required
+in the ``package_data`` argument (as shown above with ``'data/*.dat'``).
+
+When building an ``sdist``, the datafiles are also drawn from the
+``package_name.egg-info/SOURCES.txt`` file, so make sure that this is removed if
+the ``setup.py`` ``package_data`` list is updated before calling ``setup.py``.
+
+(Note: although the ``package_data`` argument was previously only available in
+``setuptools``, it was also added to the Python ``distutils`` package as of
+Python 2.4; there is `some documentation for the feature`__ available on the
+python.org website.  If using the setuptools-specific ``include_package_data``
+argument, files specified by ``package_data`` will *not* be automatically
+added to the manifest unless they are listed in the MANIFEST.in file.)
+
+__ http://docs.python.org/dist/node11.html
+
+Sometimes, the ``include_package_data`` or ``package_data`` options alone
+aren't sufficient to precisely define what files you want included.  For
+example, you may want to include package README files in your revision control
+system and source distributions, but exclude them from being installed.  So,
+setuptools offers an ``exclude_package_data`` option as well, that allows you
+to do things like this::
+
+    from setuptools import setup, find_packages
+    setup(
+        ...
+        packages=find_packages('src'),  # include all packages under src
+        package_dir={'':'src'},   # tell distutils packages are under src
+
+        include_package_data=True,    # include everything in source control
+
+        # ...but exclude README.txt from all packages
+        exclude_package_data={'': ['README.txt']},
+    )
+
+The ``exclude_package_data`` option is a dictionary mapping package names to
+lists of wildcard patterns, just like the ``package_data`` option.  And, just
+as with that option, a key of ``''`` will apply the given pattern(s) to all
+packages.  However, any files that match these patterns will be *excluded*
+from installation, even if they were listed in ``package_data`` or were
+included as a result of using ``include_package_data``.
+
+In summary, the three options allow you to:
+
+``include_package_data``
+    Accept all data files and directories matched by ``MANIFEST.in``.
+
+``package_data``
+    Specify additional patterns to match files that may or may
+    not be matched by ``MANIFEST.in`` or found in source control.
+
+``exclude_package_data``
+    Specify patterns for data files and directories that should *not* be
+    included when a package is installed, even if they would otherwise have
+    been included due to the use of the preceding options.
+
+NOTE: Due to the way the distutils build process works, a data file that you
+include in your project and then stop including may be "orphaned" in your
+project's build directories, requiring you to run ``setup.py clean --all`` to
+fully remove them.  This may also be important for your users and contributors
+if they track intermediate revisions of your project using Subversion; be sure
+to let them know when you make changes that remove files from inclusion so they
+can run ``setup.py clean --all``.
+
+
+Accessing Data Files at Runtime
+-------------------------------
+
+Typically, existing programs manipulate a package's ``__file__`` attribute in
+order to find the location of data files.  However, this manipulation isn't
+compatible with PEP 302-based import hooks, including importing from zip files
+and Python Eggs.  It is strongly recommended that, if you are using data files,
+you should use the :ref:`ResourceManager API` of ``pkg_resources`` to access
+them.  The ``pkg_resources`` module is distributed as part of setuptools, so if
+you're using setuptools to distribute your package, there is no reason not to
+use its resource management API.  See also `Accessing Package Resources`_ for
+a quick example of converting code that uses ``__file__`` to use
+``pkg_resources`` instead.
+
+.. _Accessing Package Resources: http://peak.telecommunity.com/DevCenter/PythonEggs#accessing-package-resources
+
+
+Non-Package Data Files
+----------------------
+
+The ``distutils`` normally install general "data files" to a platform-specific
+location (e.g. ``/usr/share``).  This feature intended to be used for things
+like documentation, example configuration files, and the like.  ``setuptools``
+does not install these data files in a separate location, however.  They are
+bundled inside the egg file or directory, alongside the Python modules and
+packages.  The data files can also be accessed using the :ref:`ResourceManager
+API`, by specifying a ``Requirement`` instead of a package name::
+
+    from pkg_resources import Requirement, resource_filename
+    filename = resource_filename(Requirement.parse("MyProject"),"sample.conf")
+
+The above code will obtain the filename of the "sample.conf" file in the data
+root of the "MyProject" distribution.
+
+Note, by the way, that this encapsulation of data files means that you can't
+actually install data files to some arbitrary location on a user's machine;
+this is a feature, not a bug.  You can always include a script in your
+distribution that extracts and copies your the documentation or data files to
+a user-specified location, at their discretion.  If you put related data files
+in a single directory, you can use ``resource_filename()`` with the directory
+name to get a filesystem directory that then can be copied with the ``shutil``
+module.  (Even if your package is installed as a zipfile, calling
+``resource_filename()`` on a directory will return an actual filesystem
+directory, whose contents will be that entire subtree of your distribution.)
+
+(Of course, if you're writing a new package, you can just as easily place your
+data files or directories inside one of your packages, rather than using the
+distutils' approach.  However, if you're updating an existing application, it
+may be simpler not to change the way it currently specifies these data files.)
+
+
+Automatic Resource Extraction
+-----------------------------
+
+If you are using tools that expect your resources to be "real" files, or your
+project includes non-extension native libraries or other files that your C
+extensions expect to be able to access, you may need to list those files in
+the ``eager_resources`` argument to ``setup()``, so that the files will be
+extracted together, whenever a C extension in the project is imported.
+
+This is especially important if your project includes shared libraries *other*
+than distutils-built C extensions, and those shared libraries use file
+extensions other than ``.dll``, ``.so``, or ``.dylib``, which are the
+extensions that setuptools 0.6a8 and higher automatically detects as shared
+libraries and adds to the ``native_libs.txt`` file for you.  Any shared
+libraries whose names do not end with one of those extensions should be listed
+as ``eager_resources``, because they need to be present in the filesystem when
+he C extensions that link to them are used.
+
+The ``pkg_resources`` runtime for compressed packages will automatically
+extract *all* C extensions and ``eager_resources`` at the same time, whenever
+*any* C extension or eager resource is requested via the ``resource_filename()``
+API.  (C extensions are imported using ``resource_filename()`` internally.)
+This ensures that C extensions will see all of the "real" files that they
+expect to see.
+
+Note also that you can list directory resource names in ``eager_resources`` as
+well, in which case the directory's contents (including subdirectories) will be
+extracted whenever any C extension or eager resource is requested.
+
+Please note that if you're not sure whether you need to use this argument, you
+don't!  It's really intended to support projects with lots of non-Python
+dependencies and as a last resort for crufty projects that can't otherwise
+handle being compressed.  If your package is pure Python, Python plus data
+files, or Python plus C, you really don't need this.  You've got to be using
+either C or an external program that needs "real" files in your project before
+there's any possibility of ``eager_resources`` being relevant to your project.
+
+
+Extensible Applications and Frameworks
+======================================
+
+
+.. _Entry Points:
+
+Dynamic Discovery of Services and Plugins
+-----------------------------------------
+
+``setuptools`` supports creating libraries that "plug in" to extensible
+applications and frameworks, by letting you register "entry points" in your
+project that can be imported by the application or framework.
+
+For example, suppose that a blogging tool wants to support plugins
+that provide translation for various file types to the blog's output format.
+The framework might define an "entry point group" called ``blogtool.parsers``,
+and then allow plugins to register entry points for the file extensions they
+support.
+
+This would allow people to create distributions that contain one or more
+parsers for different file types, and then the blogging tool would be able to
+find the parsers at runtime by looking up an entry point for the file
+extension (or mime type, or however it wants to).
+
+Note that if the blogging tool includes parsers for certain file formats, it
+can register these as entry points in its own setup script, which means it
+doesn't have to special-case its built-in formats.  They can just be treated
+the same as any other plugin's entry points would be.
+
+If you're creating a project that plugs in to an existing application or
+framework, you'll need to know what entry points or entry point groups are
+defined by that application or framework.  Then, you can register entry points
+in your setup script.  Here are a few examples of ways you might register an
+``.rst`` file parser entry point in the ``blogtool.parsers`` entry point group,
+for our hypothetical blogging tool::
+
+    setup(
+        # ...
+        entry_points={'blogtool.parsers': '.rst = some_module:SomeClass'}
+    )
+
+    setup(
+        # ...
+        entry_points={'blogtool.parsers': ['.rst = some_module:a_func']}
+    )
+
+    setup(
+        # ...
+        entry_points="""
+            [blogtool.parsers]
+            .rst = some.nested.module:SomeClass.some_classmethod [reST]
+        """,
+        extras_require=dict(reST="Docutils>=0.3.5")
+    )
+
+The ``entry_points`` argument to ``setup()`` accepts either a string with
+``.ini``-style sections, or a dictionary mapping entry point group names to
+either strings or lists of strings containing entry point specifiers.  An
+entry point specifier consists of a name and value, separated by an ``=``
+sign.  The value consists of a dotted module name, optionally followed by a
+``:`` and a dotted identifier naming an object within the module.  It can
+also include a bracketed list of "extras" that are required for the entry
+point to be used.  When the invoking application or framework requests loading
+of an entry point, any requirements implied by the associated extras will be
+passed to ``pkg_resources.require()``, so that an appropriate error message
+can be displayed if the needed package(s) are missing.  (Of course, the
+invoking app or framework can ignore such errors if it wants to make an entry
+point optional if a requirement isn't installed.)
+
+
+Defining Additional Metadata
+----------------------------
+
+Some extensible applications and frameworks may need to define their own kinds
+of metadata to include in eggs, which they can then access using the
+``pkg_resources`` metadata APIs.  Ordinarily, this is done by having plugin
+developers include additional files in their ``ProjectName.egg-info``
+directory.  However, since it can be tedious to create such files by hand, you
+may want to create a distutils extension that will create the necessary files
+from arguments to ``setup()``, in much the same way that ``setuptools`` does
+for many of the ``setup()`` arguments it adds.  See the section below on
+`Creating distutils Extensions`_ for more details, especially the subsection on
+`Adding new EGG-INFO Files`_.
+
+
+"Development Mode"
+==================
+
+Under normal circumstances, the ``distutils`` assume that you are going to
+build a distribution of your project, not use it in its "raw" or "unbuilt"
+form.  If you were to use the ``distutils`` that way, you would have to rebuild
+and reinstall your project every time you made a change to it during
+development.
+
+Another problem that sometimes comes up with the ``distutils`` is that you may
+need to do development on two related projects at the same time.  You may need
+to put both projects' packages in the same directory to run them, but need to
+keep them separate for revision control purposes.  How can you do this?
+
+Setuptools allows you to deploy your projects for use in a common directory or
+staging area, but without copying any files.  Thus, you can edit each project's
+code in its checkout directory, and only need to run build commands when you
+change a project's C extensions or similarly compiled files.  You can even
+deploy a project into another project's checkout directory, if that's your
+preferred way of working (as opposed to using a common independent staging area
+or the site-packages directory).
+
+To do this, use the ``setup.py develop`` command.  It works very similarly to
+``setup.py install`` or the EasyInstall tool, except that it doesn't actually
+install anything.  Instead, it creates a special ``.egg-link`` file in the
+deployment directory, that links to your project's source code.  And, if your
+deployment directory is Python's ``site-packages`` directory, it will also
+update the ``easy-install.pth`` file to include your project's source code,
+thereby making it available on ``sys.path`` for all programs using that Python
+installation.
+
+If you have enabled the ``use_2to3`` flag, then of course the ``.egg-link``
+will not link directly to your source code when run under Python 3, since
+that source code would be made for Python 2 and not work under Python 3.
+Instead the ``setup.py develop`` will build Python 3 code under the ``build``
+directory, and link there. This means that after doing code changes you will
+have to run ``setup.py build`` before these changes are picked up by your
+Python 3 installation.
+
+In addition, the ``develop`` command creates wrapper scripts in the target
+script directory that will run your in-development scripts after ensuring that
+all your ``install_requires`` packages are available on ``sys.path``.
+
+You can deploy the same project to multiple staging areas, e.g. if you have
+multiple projects on the same machine that are sharing the same project you're
+doing development work.
+
+When you're done with a given development task, you can remove the project
+source from a staging area using ``setup.py develop --uninstall``, specifying
+the desired staging area if it's not the default.
+
+There are several options to control the precise behavior of the ``develop``
+command; see the section on the `develop`_ command below for more details.
+
+Note that you can also apply setuptools commands to non-setuptools projects,
+using commands like this::
+
+   python -c "import setuptools; execfile('setup.py')" develop
+
+That is, you can simply list the normal setup commands and options following
+the quoted part.
+
+
+Distributing a ``setuptools``-based project
+===========================================
+
+Using ``setuptools``...  Without bundling it!
+---------------------------------------------
+
+.. warning:: **ez_setup** is deprecated in favor of PIP with **PEP-518** support.
+
+Your users might not have ``setuptools`` installed on their machines, or even
+if they do, it might not be the right version.  Fixing this is easy; just
+download `ez_setup.py`_, and put it in the same directory as your ``setup.py``
+script.  (Be sure to add it to your revision control system, too.)  Then add
+these two lines to the very top of your setup script, before the script imports
+anything from setuptools:
+
+.. code-block:: python
+
+    import ez_setup
+    ez_setup.use_setuptools()
+
+That's it.  The ``ez_setup`` module will automatically download a matching
+version of ``setuptools`` from PyPI, if it isn't present on the target system.
+Whenever you install an updated version of setuptools, you should also update
+your projects' ``ez_setup.py`` files, so that a matching version gets installed
+on the target machine(s).
+
+By the way, setuptools supports the new PyPI "upload" command, so you can use
+``setup.py sdist upload`` or ``setup.py bdist_egg upload`` to upload your
+source or egg distributions respectively.  Your project's current version must
+be registered with PyPI first, of course; you can use ``setup.py register`` to
+do that.  Or you can do it all in one step, e.g. ``setup.py register sdist
+bdist_egg upload`` will register the package, build source and egg
+distributions, and then upload them both to PyPI, where they'll be easily
+found by other projects that depend on them.
+
+(By the way, if you need to distribute a specific version of ``setuptools``,
+you can specify the exact version and base download URL as parameters to the
+``use_setuptools()`` function.  See the function's docstring for details.)
+
+
+What Your Users Should Know
+---------------------------
+
+In general, a setuptools-based project looks just like any distutils-based
+project -- as long as your users have an internet connection and are installing
+to ``site-packages``, that is.  But for some users, these conditions don't
+apply, and they may become frustrated if this is their first encounter with
+a setuptools-based project.  To keep these users happy, you should review the
+following topics in your project's installation instructions, if they are
+relevant to your project and your target audience isn't already familiar with
+setuptools and ``easy_install``.
+
+Network Access
+    If your project is using ``ez_setup``, you should inform users of the
+    need to either have network access, or to preinstall the correct version of
+    setuptools using the `EasyInstall installation instructions`_.  Those
+    instructions also have tips for dealing with firewalls as well as how to
+    manually download and install setuptools.
+
+Custom Installation Locations
+    You should inform your users that if they are installing your project to
+    somewhere other than the main ``site-packages`` directory, they should
+    first install setuptools using the instructions for `Custom Installation
+    Locations`_, before installing your project.
+
+Your Project's Dependencies
+    If your project depends on other projects that may need to be downloaded
+    from PyPI or elsewhere, you should list them in your installation
+    instructions, or tell users how to find out what they are.  While most
+    users will not need this information, any users who don't have unrestricted
+    internet access may have to find, download, and install the other projects
+    manually.  (Note, however, that they must still install those projects
+    using ``easy_install``, or your project will not know they are installed,
+    and your setup script will try to download them again.)
+
+    If you want to be especially friendly to users with limited network access,
+    you may wish to build eggs for your project and its dependencies, making
+    them all available for download from your site, or at least create a page
+    with links to all of the needed eggs.  In this way, users with limited
+    network access can manually download all the eggs to a single directory,
+    then use the ``-f`` option of ``easy_install`` to specify the directory
+    to find eggs in.  Users who have full network access can just use ``-f``
+    with the URL of your download page, and ``easy_install`` will find all the
+    needed eggs using your links directly.  This is also useful when your
+    target audience isn't able to compile packages (e.g. most Windows users)
+    and your package or some of its dependencies include C code.
+
+Revision Control System Users and Co-Developers
+    Users and co-developers who are tracking your in-development code using
+    a revision control system should probably read this manual's sections
+    regarding such development.  Alternately, you may wish to create a
+    quick-reference guide containing the tips from this manual that apply to
+    your particular situation.  For example, if you recommend that people use
+    ``setup.py develop`` when tracking your in-development code, you should let
+    them know that this needs to be run after every update or commit.
+
+    Similarly, if you remove modules or data files from your project, you
+    should remind them to run ``setup.py clean --all`` and delete any obsolete
+    ``.pyc`` or ``.pyo``.  (This tip applies to the distutils in general, not
+    just setuptools, but not everybody knows about them; be kind to your users
+    by spelling out your project's best practices rather than leaving them
+    guessing.)
+
+Creating System Packages
+    Some users want to manage all Python packages using a single package
+    manager, and sometimes that package manager isn't ``easy_install``!
+    Setuptools currently supports ``bdist_rpm``, ``bdist_wininst``, and
+    ``bdist_dumb`` formats for system packaging.  If a user has a locally-
+    installed "bdist" packaging tool that internally uses the distutils
+    ``install`` command, it should be able to work with ``setuptools``.  Some
+    examples of "bdist" formats that this should work with include the
+    ``bdist_nsi`` and ``bdist_msi`` formats for Windows.
+
+    However, packaging tools that build binary distributions by running
+    ``setup.py install`` on the command line or as a subprocess will require
+    modification to work with setuptools.  They should use the
+    ``--single-version-externally-managed`` option to the ``install`` command,
+    combined with the standard ``--root`` or ``--record`` options.
+    See the `install command`_ documentation below for more details.  The
+    ``bdist_deb`` command is an example of a command that currently requires
+    this kind of patching to work with setuptools.
+
+    If you or your users have a problem building a usable system package for
+    your project, please report the problem via the mailing list so that
+    either the "bdist" tool in question or setuptools can be modified to
+    resolve the issue.
+
+
+Setting the ``zip_safe`` flag
+-----------------------------
+
+For some use cases (such as bundling as part of a larger application), Python
+packages may be run directly from a zip file.
+Not all packages, however, are capable of running in compressed form, because
+they may expect to be able to access either source code or data files as
+normal operating system files.  So, ``setuptools`` can install your project
+as a zipfile or a directory, and its default choice is determined by the
+project's ``zip_safe`` flag.
+
+You can pass a True or False value for the ``zip_safe`` argument to the
+``setup()`` function, or you can omit it.  If you omit it, the ``bdist_egg``
+command will analyze your project's contents to see if it can detect any
+conditions that would prevent it from working in a zipfile.  It will output
+notices to the console about any such conditions that it finds.
+
+Currently, this analysis is extremely conservative: it will consider the
+project unsafe if it contains any C extensions or datafiles whatsoever.  This
+does *not* mean that the project can't or won't work as a zipfile!  It just
+means that the ``bdist_egg`` authors aren't yet comfortable asserting that
+the project *will* work.  If the project contains no C or data files, and does
+no ``__file__`` or ``__path__`` introspection or source code manipulation, then
+there is an extremely solid chance the project will work when installed as a
+zipfile.  (And if the project uses ``pkg_resources`` for all its data file
+access, then C extensions and other data files shouldn't be a problem at all.
+See the `Accessing Data Files at Runtime`_ section above for more information.)
+
+However, if ``bdist_egg`` can't be *sure* that your package will work, but
+you've checked over all the warnings it issued, and you are either satisfied it
+*will* work (or if you want to try it for yourself), then you should set
+``zip_safe`` to ``True`` in your ``setup()`` call.  If it turns out that it
+doesn't work, you can always change it to ``False``, which will force
+``setuptools`` to install your project as a directory rather than as a zipfile.
+
+Of course, the end-user can still override either decision, if they are using
+EasyInstall to install your package.  And, if you want to override for testing
+purposes, you can just run ``setup.py easy_install --zip-ok .`` or ``setup.py
+easy_install --always-unzip .`` in your project directory. to install the
+package as a zipfile or directory, respectively.
+
+In the future, as we gain more experience with different packages and become
+more satisfied with the robustness of the ``pkg_resources`` runtime, the
+"zip safety" analysis may become less conservative.  However, we strongly
+recommend that you determine for yourself whether your project functions
+correctly when installed as a zipfile, correct any problems if you can, and
+then make an explicit declaration of ``True`` or ``False`` for the ``zip_safe``
+flag, so that it will not be necessary for ``bdist_egg`` or ``EasyInstall`` to
+try to guess whether your project can work as a zipfile.
+
+
+Namespace Packages
+------------------
+
+Sometimes, a large package is more useful if distributed as a collection of
+smaller eggs.  However, Python does not normally allow the contents of a
+package to be retrieved from more than one location.  "Namespace packages"
+are a solution for this problem.  When you declare a package to be a namespace
+package, it means that the package has no meaningful contents in its
+``__init__.py``, and that it is merely a container for modules and subpackages.
+
+The ``pkg_resources`` runtime will then automatically ensure that the contents
+of namespace packages that are spread over multiple eggs or directories are
+combined into a single "virtual" package.
+
+The ``namespace_packages`` argument to ``setup()`` lets you declare your
+project's namespace packages, so that they will be included in your project's
+metadata.  The argument should list the namespace packages that the egg
+participates in.  For example, the ZopeInterface project might do this::
+
+    setup(
+        # ...
+        namespace_packages=['zope']
+    )
+
+because it contains a ``zope.interface`` package that lives in the ``zope``
+namespace package.  Similarly, a project for a standalone ``zope.publisher``
+would also declare the ``zope`` namespace package.  When these projects are
+installed and used, Python will see them both as part of a "virtual" ``zope``
+package, even though they will be installed in different locations.
+
+Namespace packages don't have to be top-level packages.  For example, Zope 3's
+``zope.app`` package is a namespace package, and in the future PEAK's
+``peak.util`` package will be too.
+
+Note, by the way, that your project's source tree must include the namespace
+packages' ``__init__.py`` files (and the ``__init__.py`` of any parent
+packages), in a normal Python package layout.  These ``__init__.py`` files
+*must* contain the line::
+
+    __import__('pkg_resources').declare_namespace(__name__)
+
+This code ensures that the namespace package machinery is operating and that
+the current package is registered as a namespace package.
+
+You must NOT include any other code and data in a namespace package's
+``__init__.py``.  Even though it may appear to work during development, or when
+projects are installed as ``.egg`` files, it will not work when the projects
+are installed using "system" packaging tools -- in such cases the
+``__init__.py`` files will not be installed, let alone executed.
+
+You must include the ``declare_namespace()``  line in the ``__init__.py`` of
+*every* project that has contents for the namespace package in question, in
+order to ensure that the namespace will be declared regardless of which
+project's copy of ``__init__.py`` is loaded first.  If the first loaded
+``__init__.py`` doesn't declare it, it will never *be* declared, because no
+other copies will ever be loaded!
+
+
+TRANSITIONAL NOTE
+~~~~~~~~~~~~~~~~~
+
+Setuptools automatically calls ``declare_namespace()`` for you at runtime,
+but future versions may *not*.  This is because the automatic declaration
+feature has some negative side effects, such as needing to import all namespace
+packages during the initialization of the ``pkg_resources`` runtime, and also
+the need for ``pkg_resources`` to be explicitly imported before any namespace
+packages work at all.  In some future releases, you'll be responsible
+for including your own declaration lines, and the automatic declaration feature
+will be dropped to get rid of the negative side effects.
+
+During the remainder of the current development cycle, therefore, setuptools
+will warn you about missing ``declare_namespace()`` calls in your
+``__init__.py`` files, and you should correct these as soon as possible
+before the compatibility support is removed.
+Namespace packages without declaration lines will not work
+correctly once a user has upgraded to a later version, so it's important that
+you make this change now in order to avoid having your code break in the field.
+Our apologies for the inconvenience, and thank you for your patience.
+
+
+
+Tagging and "Daily Build" or "Snapshot" Releases
+------------------------------------------------
+
+When a set of related projects are under development, it may be important to
+track finer-grained version increments than you would normally use for e.g.
+"stable" releases.  While stable releases might be measured in dotted numbers
+with alpha/beta/etc. status codes, development versions of a project often
+need to be tracked by revision or build number or even build date.  This is
+especially true when projects in development need to refer to one another, and
+therefore may literally need an up-to-the-minute version of something!
+
+To support these scenarios, ``setuptools`` allows you to "tag" your source and
+egg distributions by adding one or more of the following to the project's
+"official" version identifier:
+
+* A manually-specified pre-release tag, such as "build" or "dev", or a
+  manually-specified post-release tag, such as a build or revision number
+  (``--tag-build=STRING, -bSTRING``)
+
+* An 8-character representation of the build date (``--tag-date, -d``), as
+  a postrelease tag
+
+You can add these tags by adding ``egg_info`` and the desired options to
+the command line ahead of the ``sdist`` or ``bdist`` commands that you want
+to generate a daily build or snapshot for.  See the section below on the
+`egg_info`_ command for more details.
+
+(Also, before you release your project, be sure to see the section above on
+`Specifying Your Project's Version`_ for more information about how pre- and
+post-release tags affect how setuptools and EasyInstall interpret version
+numbers.  This is important in order to make sure that dependency processing
+tools will know which versions of your project are newer than others.)
+
+Finally, if you are creating builds frequently, and either building them in a
+downloadable location or are copying them to a distribution server, you should
+probably also check out the `rotate`_ command, which lets you automatically
+delete all but the N most-recently-modified distributions matching a glob
+pattern.  So, you can use a command line like::
+
+    setup.py egg_info -rbDEV bdist_egg rotate -m.egg -k3
+
+to build an egg whose version info includes 'DEV-rNNNN' (where NNNN is the
+most recent Subversion revision that affected the source tree), and then
+delete any egg files from the distribution directory except for the three
+that were built most recently.
+
+If you have to manage automated builds for multiple packages, each with
+different tagging and rotation policies, you may also want to check out the
+`alias`_ command, which would let each package define an alias like ``daily``
+that would perform the necessary tag, build, and rotate commands.  Then, a
+simpler script or cron job could just run ``setup.py daily`` in each project
+directory.  (And, you could also define sitewide or per-user default versions
+of the ``daily`` alias, so that projects that didn't define their own would
+use the appropriate defaults.)
+
+
+Generating Source Distributions
+-------------------------------
+
+``setuptools`` enhances the distutils' default algorithm for source file
+selection with pluggable endpoints for looking up files to include. If you are
+using a revision control system, and your source distributions only need to
+include files that you're tracking in revision control, use a corresponding
+plugin instead of writing a ``MANIFEST.in`` file. See the section below on
+`Adding Support for Revision Control Systems`_ for information on plugins.
+
+If you need to include automatically generated files, or files that are kept in
+an unsupported revision control system, you'll need to create a ``MANIFEST.in``
+file to specify any files that the default file location algorithm doesn't
+catch.  See the distutils documentation for more information on the format of
+the ``MANIFEST.in`` file.
+
+But, be sure to ignore any part of the distutils documentation that deals with
+``MANIFEST`` or how it's generated from ``MANIFEST.in``; setuptools shields you
+from these issues and doesn't work the same way in any case.  Unlike the
+distutils, setuptools regenerates the source distribution manifest file
+every time you build a source distribution, and it builds it inside the
+project's ``.egg-info`` directory, out of the way of your main project
+directory.  You therefore need not worry about whether it is up-to-date or not.
+
+Indeed, because setuptools' approach to determining the contents of a source
+distribution is so much simpler, its ``sdist`` command omits nearly all of
+the options that the distutils' more complex ``sdist`` process requires.  For
+all practical purposes, you'll probably use only the ``--formats`` option, if
+you use any option at all.
+
+
+Making your package available for EasyInstall
+---------------------------------------------
+
+If you use the ``register`` command (``setup.py register``) to register your
+package with PyPI, that's most of the battle right there.  (See the
+`docs for the register command`_ for more details.)
+
+.. _docs for the register command: http://docs.python.org/dist/package-index.html
+
+If you also use the `upload`_ command to upload actual distributions of your
+package, that's even better, because EasyInstall will be able to find and
+download them directly from your project's PyPI page.
+
+However, there may be reasons why you don't want to upload distributions to
+PyPI, and just want your existing distributions (or perhaps a Subversion
+checkout) to be used instead.
+
+So here's what you need to do before running the ``register`` command.  There
+are three ``setup()`` arguments that affect EasyInstall:
+
+``url`` and ``download_url``
+   These become links on your project's PyPI page.  EasyInstall will examine
+   them to see if they link to a package ("primary links"), or whether they are
+   HTML pages.  If they're HTML pages, EasyInstall scans all HREF's on the
+   page for primary links
+
+``long_description``
+   EasyInstall will check any URLs contained in this argument to see if they
+   are primary links.
+
+A URL is considered a "primary link" if it is a link to a .tar.gz, .tgz, .zip,
+.egg, .egg.zip, .tar.bz2, or .exe file, or if it has an ``#egg=project`` or
+``#egg=project-version`` fragment identifier attached to it.  EasyInstall
+attempts to determine a project name and optional version number from the text
+of a primary link *without* downloading it.  When it has found all the primary
+links, EasyInstall will select the best match based on requested version,
+platform compatibility, and other criteria.
+
+So, if your ``url`` or ``download_url`` point either directly to a downloadable
+source distribution, or to HTML page(s) that have direct links to such, then
+EasyInstall will be able to locate downloads automatically.  If you want to
+make Subversion checkouts available, then you should create links with either
+``#egg=project`` or ``#egg=project-version`` added to the URL.  You should
+replace ``project`` and ``version`` with the values they would have in an egg
+filename.  (Be sure to actually generate an egg and then use the initial part
+of the filename, rather than trying to guess what the escaped form of the
+project name and version number will be.)
+
+Note that Subversion checkout links are of lower precedence than other kinds
+of distributions, so EasyInstall will not select a Subversion checkout for
+downloading unless it has a version included in the ``#egg=`` suffix, and
+it's a higher version than EasyInstall has seen in any other links for your
+project.
+
+As a result, it's a common practice to use mark checkout URLs with a version of
+"dev" (i.e., ``#egg=projectname-dev``), so that users can do something like
+this::
+
+    easy_install --editable projectname==dev
+
+in order to check out the in-development version of ``projectname``.
+
+
+Making "Official" (Non-Snapshot) Releases
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When you make an official release, creating source or binary distributions,
+you will need to override the tag settings from ``setup.cfg``, so that you
+don't end up registering versions like ``foobar-0.7a1.dev-r34832``.  This is
+easy to do if you are developing on the trunk and using tags or branches for
+your releases - just make the change to ``setup.cfg`` after branching or
+tagging the release, so the trunk will still produce development snapshots.
+
+Alternately, if you are not branching for releases, you can override the
+default version options on the command line, using something like::
+
+    python setup.py egg_info -Db "" sdist bdist_egg register upload
+
+The first part of this command (``egg_info -Db ""``) will override the
+configured tag information, before creating source and binary eggs, registering
+the project with PyPI, and uploading the files.  Thus, these commands will use
+the plain version from your ``setup.py``, without adding the build designation
+string.
+
+Of course, if you will be doing this a lot, you may wish to create a personal
+alias for this operation, e.g.::
+
+    python setup.py alias -u release egg_info -Db ""
+
+You can then use it like this::
+
+    python setup.py release sdist bdist_egg register upload
+
+Or of course you can create more elaborate aliases that do all of the above.
+See the sections below on the `egg_info`_ and `alias`_ commands for more ideas.
+
+
+
+Distributing Extensions compiled with Pyrex
+-------------------------------------------
+
+``setuptools`` includes transparent support for building Pyrex extensions, as
+long as you define your extensions using ``setuptools.Extension``, *not*
+``distutils.Extension``.  You must also not import anything from Pyrex in
+your setup script.
+
+If you follow these rules, you can safely list ``.pyx`` files as the source
+of your ``Extension`` objects in the setup script.  ``setuptools`` will detect
+at build time whether Pyrex is installed or not.  If it is, then ``setuptools``
+will use it.  If not, then ``setuptools`` will silently change the
+``Extension`` objects to refer to the ``.c`` counterparts of the ``.pyx``
+files, so that the normal distutils C compilation process will occur.
+
+Of course, for this to work, your source distributions must include the C
+code generated by Pyrex, as well as your original ``.pyx`` files.  This means
+that you will probably want to include current ``.c`` files in your revision
+control system, rebuilding them whenever you check changes in for the ``.pyx``
+source files.  This will ensure that people tracking your project in a revision
+control system will be able to build it even if they don't have Pyrex
+installed, and that your source releases will be similarly usable with or
+without Pyrex.
+
+
+-----------------
+Command Reference
+-----------------
+
+.. _alias:
+
+``alias`` - Define shortcuts for commonly used commands
+=======================================================
+
+Sometimes, you need to use the same commands over and over, but you can't
+necessarily set them as defaults.  For example, if you produce both development
+snapshot releases and "stable" releases of a project, you may want to put
+the distributions in different places, or use different ``egg_info`` tagging
+options, etc.  In these cases, it doesn't make sense to set the options in
+a distutils configuration file, because the values of the options changed based
+on what you're trying to do.
+
+Setuptools therefore allows you to define "aliases" - shortcut names for
+an arbitrary string of commands and options, using ``setup.py alias aliasname
+expansion``, where aliasname is the name of the new alias, and the remainder of
+the command line supplies its expansion.  For example, this command defines
+a sitewide alias called "daily", that sets various ``egg_info`` tagging
+options::
+
+    setup.py alias --global-config daily egg_info --tag-build=development
+
+Once the alias is defined, it can then be used with other setup commands,
+e.g.::
+
+    setup.py daily bdist_egg        # generate a daily-build .egg file
+    setup.py daily sdist            # generate a daily-build source distro
+    setup.py daily sdist bdist_egg  # generate both
+
+The above commands are interpreted as if the word ``daily`` were replaced with
+``egg_info --tag-build=development``.
+
+Note that setuptools will expand each alias *at most once* in a given command
+line.  This serves two purposes.  First, if you accidentally create an alias
+loop, it will have no effect; you'll instead get an error message about an
+unknown command.  Second, it allows you to define an alias for a command, that
+uses that command.  For example, this (project-local) alias::
+
+    setup.py alias bdist_egg bdist_egg rotate -k1 -m.egg
+
+redefines the ``bdist_egg`` command so that it always runs the ``rotate``
+command afterwards to delete all but the newest egg file.  It doesn't loop
+indefinitely on ``bdist_egg`` because the alias is only expanded once when
+used.
+
+You can remove a defined alias with the ``--remove`` (or ``-r``) option, e.g.::
+
+    setup.py alias --global-config --remove daily
+
+would delete the "daily" alias we defined above.
+
+Aliases can be defined on a project-specific, per-user, or sitewide basis.  The
+default is to define or remove a project-specific alias, but you can use any of
+the `configuration file options`_ (listed under the `saveopts`_ command, below)
+to determine which distutils configuration file an aliases will be added to
+(or removed from).
+
+Note that if you omit the "expansion" argument to the ``alias`` command,
+you'll get output showing that alias' current definition (and what
+configuration file it's defined in).  If you omit the alias name as well,
+you'll get a listing of all current aliases along with their configuration
+file locations.
+
+
+``bdist_egg`` - Create a Python Egg for the project
+===================================================
+
+This command generates a Python Egg (``.egg`` file) for the project.  Python
+Eggs are the preferred binary distribution format for EasyInstall, because they
+are cross-platform (for "pure" packages), directly importable, and contain
+project metadata including scripts and information about the project's
+dependencies.  They can be simply downloaded and added to ``sys.path``
+directly, or they can be placed in a directory on ``sys.path`` and then
+automatically discovered by the egg runtime system.
+
+This command runs the `egg_info`_ command (if it hasn't already run) to update
+the project's metadata (``.egg-info``) directory.  If you have added any extra
+metadata files to the ``.egg-info`` directory, those files will be included in
+the new egg file's metadata directory, for use by the egg runtime system or by
+any applications or frameworks that use that metadata.
+
+You won't usually need to specify any special options for this command; just
+use ``bdist_egg`` and you're done.  But there are a few options that may
+be occasionally useful:
+
+``--dist-dir=DIR, -d DIR``
+    Set the directory where the ``.egg`` file will be placed.  If you don't
+    supply this, then the ``--dist-dir`` setting of the ``bdist`` command
+    will be used, which is usually a directory named ``dist`` in the project
+    directory.
+
+``--plat-name=PLATFORM, -p PLATFORM``
+    Set the platform name string that will be embedded in the egg's filename
+    (assuming the egg contains C extensions).  This can be used to override
+    the distutils default platform name with something more meaningful.  Keep
+    in mind, however, that the egg runtime system expects to see eggs with
+    distutils platform names, so it may ignore or reject eggs with non-standard
+    platform names.  Similarly, the EasyInstall program may ignore them when
+    searching web pages for download links.  However, if you are
+    cross-compiling or doing some other unusual things, you might find a use
+    for this option.
+
+``--exclude-source-files``
+    Don't include any modules' ``.py`` files in the egg, just compiled Python,
+    C, and data files.  (Note that this doesn't affect any ``.py`` files in the
+    EGG-INFO directory or its subdirectories, since for example there may be
+    scripts with a ``.py`` extension which must still be retained.)  We don't
+    recommend that you use this option except for packages that are being
+    bundled for proprietary end-user applications, or for "embedded" scenarios
+    where space is at an absolute premium.  On the other hand, if your package
+    is going to be installed and used in compressed form, you might as well
+    exclude the source because Python's ``traceback`` module doesn't currently
+    understand how to display zipped source code anyway, or how to deal with
+    files that are in a different place from where their code was compiled.
+
+There are also some options you will probably never need, but which are there
+because they were copied from similar ``bdist`` commands used as an example for
+creating this one.  They may be useful for testing and debugging, however,
+which is why we kept them:
+
+``--keep-temp, -k``
+    Keep the contents of the ``--bdist-dir`` tree around after creating the
+    ``.egg`` file.
+
+``--bdist-dir=DIR, -b DIR``
+    Set the temporary directory for creating the distribution.  The entire
+    contents of this directory are zipped to create the ``.egg`` file, after
+    running various installation commands to copy the package's modules, data,
+    and extensions here.
+
+``--skip-build``
+    Skip doing any "build" commands; just go straight to the
+    install-and-compress phases.
+
+
+.. _develop:
+
+``develop`` - Deploy the project source in "Development Mode"
+=============================================================
+
+This command allows you to deploy your project's source for use in one or more
+"staging areas" where it will be available for importing.  This deployment is
+done in such a way that changes to the project source are immediately available
+in the staging area(s), without needing to run a build or install step after
+each change.
+
+The ``develop`` command works by creating an ``.egg-link`` file (named for the
+project) in the given staging area.  If the staging area is Python's
+``site-packages`` directory, it also updates an ``easy-install.pth`` file so
+that the project is on ``sys.path`` by default for all programs run using that
+Python installation.
+
+The ``develop`` command also installs wrapper scripts in the staging area (or
+a separate directory, as specified) that will ensure the project's dependencies
+are available on ``sys.path`` before running the project's source scripts.
+And, it ensures that any missing project dependencies are available in the
+staging area, by downloading and installing them if necessary.
+
+Last, but not least, the ``develop`` command invokes the ``build_ext -i``
+command to ensure any C extensions in the project have been built and are
+up-to-date, and the ``egg_info`` command to ensure the project's metadata is
+updated (so that the runtime and wrappers know what the project's dependencies
+are).  If you make any changes to the project's setup script or C extensions,
+you should rerun the ``develop`` command against all relevant staging areas to
+keep the project's scripts, metadata and extensions up-to-date.  Most other
+kinds of changes to your project should not require any build operations or
+rerunning ``develop``, but keep in mind that even minor changes to the setup
+script (e.g. changing an entry point definition) require you to re-run the
+``develop`` or ``test`` commands to keep the distribution updated.
+
+Here are some of the options that the ``develop`` command accepts.  Note that
+they affect the project's dependencies as well as the project itself, so if you
+have dependencies that need to be installed and you use ``--exclude-scripts``
+(for example), the dependencies' scripts will not be installed either!  For
+this reason, you may want to use EasyInstall to install the project's
+dependencies before using the ``develop`` command, if you need finer control
+over the installation options for dependencies.
+
+``--uninstall, -u``
+    Un-deploy the current project.  You may use the ``--install-dir`` or ``-d``
+    option to designate the staging area.  The created ``.egg-link`` file will
+    be removed, if present and it is still pointing to the project directory.
+    The project directory will be removed from ``easy-install.pth`` if the
+    staging area is Python's ``site-packages`` directory.
+
+    Note that this option currently does *not* uninstall script wrappers!  You
+    must uninstall them yourself, or overwrite them by using EasyInstall to
+    activate a different version of the package.  You can also avoid installing
+    script wrappers in the first place, if you use the ``--exclude-scripts``
+    (aka ``-x``) option when you run ``develop`` to deploy the project.
+
+``--multi-version, -m``
+    "Multi-version" mode. Specifying this option prevents ``develop`` from
+    adding an ``easy-install.pth`` entry for the project(s) being deployed, and
+    if an entry for any version of a project already exists, the entry will be
+    removed upon successful deployment.  In multi-version mode, no specific
+    version of the package is available for importing, unless you use
+    ``pkg_resources.require()`` to put it on ``sys.path``, or you are running
+    a wrapper script generated by ``setuptools`` or EasyInstall.  (In which
+    case the wrapper script calls ``require()`` for you.)
+
+    Note that if you install to a directory other than ``site-packages``,
+    this option is automatically in effect, because ``.pth`` files can only be
+    used in ``site-packages`` (at least in Python 2.3 and 2.4). So, if you use
+    the ``--install-dir`` or ``-d`` option (or they are set via configuration
+    file(s)) your project and its dependencies will be deployed in multi-
+    version mode.
+
+``--install-dir=DIR, -d DIR``
+    Set the installation directory (staging area).  If this option is not
+    directly specified on the command line or in a distutils configuration
+    file, the distutils default installation location is used.  Normally, this
+    will be the ``site-packages`` directory, but if you are using distutils
+    configuration files, setting things like ``prefix`` or ``install_lib``,
+    then those settings are taken into account when computing the default
+    staging area.
+
+``--script-dir=DIR, -s DIR``
+    Set the script installation directory.  If you don't supply this option
+    (via the command line or a configuration file), but you *have* supplied
+    an ``--install-dir`` (via command line or config file), then this option
+    defaults to the same directory, so that the scripts will be able to find
+    their associated package installation.  Otherwise, this setting defaults
+    to the location where the distutils would normally install scripts, taking
+    any distutils configuration file settings into account.
+
+``--exclude-scripts, -x``
+    Don't deploy script wrappers.  This is useful if you don't want to disturb
+    existing versions of the scripts in the staging area.
+
+``--always-copy, -a``
+    Copy all needed distributions to the staging area, even if they
+    are already present in another directory on ``sys.path``.  By default, if
+    a requirement can be met using a distribution that is already available in
+    a directory on ``sys.path``, it will not be copied to the staging area.
+
+``--egg-path=DIR``
+    Force the generated ``.egg-link`` file to use a specified relative path
+    to the source directory.  This can be useful in circumstances where your
+    installation directory is being shared by code running under multiple
+    platforms (e.g. Mac and Windows) which have different absolute locations
+    for the code under development, but the same *relative* locations with
+    respect to the installation directory.  If you use this option when
+    installing, you must supply the same relative path when uninstalling.
+
+In addition to the above options, the ``develop`` command also accepts all of
+the same options accepted by ``easy_install``.  If you've configured any
+``easy_install`` settings in your ``setup.cfg`` (or other distutils config
+files), the ``develop`` command will use them as defaults, unless you override
+them in a ``[develop]`` section or on the command line.
+
+
+``easy_install`` - Find and install packages
+============================================
+
+This command runs the `EasyInstall tool
+<easy_install.html>`_ for you.  It is exactly
+equivalent to running the ``easy_install`` command.  All command line arguments
+following this command are consumed and not processed further by the distutils,
+so this must be the last command listed on the command line.  Please see
+the EasyInstall documentation for the options reference and usage examples.
+Normally, there is no reason to use this command via the command line, as you
+can just use ``easy_install`` directly.  It's only listed here so that you know
+it's a distutils command, which means that you can:
+
+* create command aliases that use it,
+* create distutils extensions that invoke it as a subcommand, and
+* configure options for it in your ``setup.cfg`` or other distutils config
+  files.
+
+
+.. _egg_info:
+
+``egg_info`` - Create egg metadata and set build tags
+=====================================================
+
+This command performs two operations: it updates a project's ``.egg-info``
+metadata directory (used by the ``bdist_egg``, ``develop``, and ``test``
+commands), and it allows you to temporarily change a project's version string,
+to support "daily builds" or "snapshot" releases.  It is run automatically by
+the ``sdist``, ``bdist_egg``, ``develop``, ``register``, and ``test`` commands
+in order to update the project's metadata, but you can also specify it
+explicitly in order to temporarily change the project's version string while
+executing other commands.  (It also generates the``.egg-info/SOURCES.txt``
+manifest file, which is used when you are building source distributions.)
+
+In addition to writing the core egg metadata defined by ``setuptools`` and
+required by ``pkg_resources``, this command can be extended to write other
+metadata files as well, by defining entry points in the ``egg_info.writers``
+group.  See the section on `Adding new EGG-INFO Files`_ below for more details.
+Note that using additional metadata writers may require you to include a
+``setup_requires`` argument to ``setup()`` in order to ensure that the desired
+writers are available on ``sys.path``.
+
+
+Release Tagging Options
+-----------------------
+
+The following options can be used to modify the project's version string for
+all remaining commands on the setup command line.  The options are processed
+in the order shown, so if you use more than one, the requested tags will be
+added in the following order:
+
+``--tag-build=NAME, -b NAME``
+    Append NAME to the project's version string.  Due to the way setuptools
+    processes "pre-release" version suffixes beginning with the letters "a"
+    through "e" (like "alpha", "beta", and "candidate"), you will usually want
+    to use a tag like ".build" or ".dev", as this will cause the version number
+    to be considered *lower* than the project's default version.  (If you
+    want to make the version number *higher* than the default version, you can
+    always leave off --tag-build and then use one or both of the following
+    options.)
+
+    If you have a default build tag set in your ``setup.cfg``, you can suppress
+    it on the command line using ``-b ""`` or ``--tag-build=""`` as an argument
+    to the ``egg_info`` command.
+
+``--tag-date, -d``
+    Add a date stamp of the form "-YYYYMMDD" (e.g. "-20050528") to the
+    project's version number.
+
+``--no-date, -D``
+    Don't include a date stamp in the version number.  This option is included
+    so you can override a default setting in ``setup.cfg``.
+
+
+(Note: Because these options modify the version number used for source and
+binary distributions of your project, you should first make sure that you know
+how the resulting version numbers will be interpreted by automated tools
+like EasyInstall.  See the section above on `Specifying Your Project's
+Version`_ for an explanation of pre- and post-release tags, as well as tips on
+how to choose and verify a versioning scheme for your your project.)
+
+For advanced uses, there is one other option that can be set, to change the
+location of the project's ``.egg-info`` directory.  Commands that need to find
+the project's source directory or metadata should get it from this setting:
+
+
+Other ``egg_info`` Options
+--------------------------
+
+``--egg-base=SOURCEDIR, -e SOURCEDIR``
+    Specify the directory that should contain the .egg-info directory.  This
+    should normally be the root of your project's source tree (which is not
+    necessarily the same as your project directory; some projects use a ``src``
+    or ``lib`` subdirectory as the source root).  You should not normally need
+    to specify this directory, as it is normally determined from the
+    ``package_dir`` argument to the ``setup()`` function, if any.  If there is
+    no ``package_dir`` set, this option defaults to the current directory.
+
+
+``egg_info`` Examples
+---------------------
+
+Creating a dated "nightly build" snapshot egg::
+
+    python setup.py egg_info --tag-date --tag-build=DEV bdist_egg
+
+Creating and uploading a release with no version tags, even if some default
+tags are specified in ``setup.cfg``::
+
+    python setup.py egg_info -RDb "" sdist bdist_egg register upload
+
+(Notice that ``egg_info`` must always appear on the command line *before* any
+commands that you want the version changes to apply to.)
+
+
+.. _install command:
+
+``install`` - Run ``easy_install`` or old-style installation
+============================================================
+
+The setuptools ``install`` command is basically a shortcut to run the
+``easy_install`` command on the current project.  However, for convenience
+in creating "system packages" of setuptools-based projects, you can also
+use this option:
+
+``--single-version-externally-managed``
+    This boolean option tells the ``install`` command to perform an "old style"
+    installation, with the addition of an ``.egg-info`` directory so that the
+    installed project will still have its metadata available and operate
+    normally.  If you use this option, you *must* also specify the ``--root``
+    or ``--record`` options (or both), because otherwise you will have no way
+    to identify and remove the installed files.
+
+This option is automatically in effect when ``install`` is invoked by another
+distutils command, so that commands like ``bdist_wininst`` and ``bdist_rpm``
+will create system packages of eggs.  It is also automatically in effect if
+you specify the ``--root`` option.
+
+
+``install_egg_info`` - Install an ``.egg-info`` directory in ``site-packages``
+==============================================================================
+
+Setuptools runs this command as part of ``install`` operations that use the
+``--single-version-externally-managed`` options.  You should not invoke it
+directly; it is documented here for completeness and so that distutils
+extensions such as system package builders can make use of it.  This command
+has only one option:
+
+``--install-dir=DIR, -d DIR``
+    The parent directory where the ``.egg-info`` directory will be placed.
+    Defaults to the same as the ``--install-dir`` option specified for the
+    ``install_lib`` command, which is usually the system ``site-packages``
+    directory.
+
+This command assumes that the ``egg_info`` command has been given valid options
+via the command line or ``setup.cfg``, as it will invoke the ``egg_info``
+command and use its options to locate the project's source ``.egg-info``
+directory.
+
+
+.. _rotate:
+
+``rotate`` - Delete outdated distribution files
+===============================================
+
+As you develop new versions of your project, your distribution (``dist``)
+directory will gradually fill up with older source and/or binary distribution
+files.  The ``rotate`` command lets you automatically clean these up, keeping
+only the N most-recently modified files matching a given pattern.
+
+``--match=PATTERNLIST, -m PATTERNLIST``
+    Comma-separated list of glob patterns to match.  This option is *required*.
+    The project name and ``-*`` is prepended to the supplied patterns, in order
+    to match only distributions belonging to the current project (in case you
+    have a shared distribution directory for multiple projects).  Typically,
+    you will use a glob pattern like ``.zip`` or ``.egg`` to match files of
+    the specified type.  Note that each supplied pattern is treated as a
+    distinct group of files for purposes of selecting files to delete.
+
+``--keep=COUNT, -k COUNT``
+    Number of matching distributions to keep.  For each group of files
+    identified by a pattern specified with the ``--match`` option, delete all
+    but the COUNT most-recently-modified files in that group.  This option is
+    *required*.
+
+``--dist-dir=DIR, -d DIR``
+    Directory where the distributions are.  This defaults to the value of the
+    ``bdist`` command's ``--dist-dir`` option, which will usually be the
+    project's ``dist`` subdirectory.
+
+**Example 1**: Delete all .tar.gz files from the distribution directory, except
+for the 3 most recently modified ones::
+
+    setup.py rotate --match=.tar.gz --keep=3
+
+**Example 2**: Delete all Python 2.3 or Python 2.4 eggs from the distribution
+directory, except the most recently modified one for each Python version::
+
+    setup.py rotate --match=-py2.3*.egg,-py2.4*.egg --keep=1
+
+
+.. _saveopts:
+
+``saveopts`` - Save used options to a configuration file
+========================================================
+
+Finding and editing ``distutils`` configuration files can be a pain, especially
+since you also have to translate the configuration options from command-line
+form to the proper configuration file format.  You can avoid these hassles by
+using the ``saveopts`` command.  Just add it to the command line to save the
+options you used.  For example, this command builds the project using
+the ``mingw32`` C compiler, then saves the --compiler setting as the default
+for future builds (even those run implicitly by the ``install`` command)::
+
+    setup.py build --compiler=mingw32 saveopts
+
+The ``saveopts`` command saves all options for every command specified on the
+command line to the project's local ``setup.cfg`` file, unless you use one of
+the `configuration file options`_ to change where the options are saved.  For
+example, this command does the same as above, but saves the compiler setting
+to the site-wide (global) distutils configuration::
+
+    setup.py build --compiler=mingw32 saveopts -g
+
+Note that it doesn't matter where you place the ``saveopts`` command on the
+command line; it will still save all the options specified for all commands.
+For example, this is another valid way to spell the last example::
+
+    setup.py saveopts -g build --compiler=mingw32
+
+Note, however, that all of the commands specified are always run, regardless of
+where ``saveopts`` is placed on the command line.
+
+
+Configuration File Options
+--------------------------
+
+Normally, settings such as options and aliases are saved to the project's
+local ``setup.cfg`` file.  But you can override this and save them to the
+global or per-user configuration files, or to a manually-specified filename.
+
+``--global-config, -g``
+    Save settings to the global ``distutils.cfg`` file inside the ``distutils``
+    package directory.  You must have write access to that directory to use
+    this option.  You also can't combine this option with ``-u`` or ``-f``.
+
+``--user-config, -u``
+    Save settings to the current user's ``~/.pydistutils.cfg`` (POSIX) or
+    ``$HOME/pydistutils.cfg`` (Windows) file.  You can't combine this option
+    with ``-g`` or ``-f``.
+
+``--filename=FILENAME, -f FILENAME``
+    Save settings to the specified configuration file to use.  You can't
+    combine this option with ``-g`` or ``-u``.  Note that if you specify a
+    non-standard filename, the ``distutils`` and ``setuptools`` will not
+    use the file's contents.  This option is mainly included for use in
+    testing.
+
+These options are used by other ``setuptools`` commands that modify
+configuration files, such as the `alias`_ and `setopt`_ commands.
+
+
+.. _setopt:
+
+``setopt`` - Set a distutils or setuptools option in a config file
+==================================================================
+
+This command is mainly for use by scripts, but it can also be used as a quick
+and dirty way to change a distutils configuration option without having to
+remember what file the options are in and then open an editor.
+
+**Example 1**.  Set the default C compiler to ``mingw32`` (using long option
+names)::
+
+    setup.py setopt --command=build --option=compiler --set-value=mingw32
+
+**Example 2**.  Remove any setting for the distutils default package
+installation directory (short option names)::
+
+    setup.py setopt -c install -o install_lib -r
+
+
+Options for the ``setopt`` command:
+
+``--command=COMMAND, -c COMMAND``
+    Command to set the option for.  This option is required.
+
+``--option=OPTION, -o OPTION``
+    The name of the option to set.  This option is required.
+
+``--set-value=VALUE, -s VALUE``
+    The value to set the option to.  Not needed if ``-r`` or ``--remove`` is
+    set.
+
+``--remove, -r``
+    Remove (unset) the option, instead of setting it.
+
+In addition to the above options, you may use any of the `configuration file
+options`_ (listed under the `saveopts`_ command, above) to determine which
+distutils configuration file the option will be added to (or removed from).
+
+
+.. _test:
+
+``test`` - Build package and run a unittest suite
+=================================================
+
+When doing test-driven development, or running automated builds that need
+testing before they are deployed for downloading or use, it's often useful
+to be able to run a project's unit tests without actually deploying the project
+anywhere, even using the ``develop`` command.  The ``test`` command runs a
+project's unit tests without actually deploying it, by temporarily putting the
+project's source on ``sys.path``, after first running ``build_ext -i`` and
+``egg_info`` to ensure that any C extensions and project metadata are
+up-to-date.
+
+To use this command, your project's tests must be wrapped in a ``unittest``
+test suite by either a function, a ``TestCase`` class or method, or a module
+or package containing ``TestCase`` classes.  If the named suite is a module,
+and the module has an ``additional_tests()`` function, it is called and the
+result (which must be a ``unittest.TestSuite``) is added to the tests to be
+run.  If the named suite is a package, any submodules and subpackages are
+recursively added to the overall test suite.  (Note: if your project specifies
+a ``test_loader``, the rules for processing the chosen ``test_suite`` may
+differ; see the `test_loader`_ documentation for more details.)
+
+Note that many test systems including ``doctest`` support wrapping their
+non-``unittest`` tests in ``TestSuite`` objects.  So, if you are using a test
+package that does not support this, we suggest you encourage its developers to
+implement test suite support, as this is a convenient and standard way to
+aggregate a collection of tests to be run under a common test harness.
+
+By default, tests will be run in the "verbose" mode of the ``unittest``
+package's text test runner, but you can get the "quiet" mode (just dots) if
+you supply the ``-q`` or ``--quiet`` option, either as a global option to
+the setup script (e.g. ``setup.py -q test``) or as an option for the ``test``
+command itself (e.g. ``setup.py test -q``).  There is one other option
+available:
+
+``--test-suite=NAME, -s NAME``
+    Specify the test suite (or module, class, or method) to be run
+    (e.g. ``some_module.test_suite``).  The default for this option can be
+    set by giving a ``test_suite`` argument to the ``setup()`` function, e.g.::
+
+        setup(
+            # ...
+            test_suite="my_package.tests.test_all"
+        )
+
+    If you did not set a ``test_suite`` in your ``setup()`` call, and do not
+    provide a ``--test-suite`` option, an error will occur.
+
+
+.. _upload:
+
+``upload`` - Upload source and/or egg distributions to PyPI
+===========================================================
+
+The ``upload`` command is implemented and `documented
+<https://docs.python.org/3.1/distutils/uploading.html>`_
+in distutils.
+
+Setuptools augments the ``upload`` command with support
+for `keyring <https://pypi.org/project/keyring/>`_,
+allowing the password to be stored in a secure
+location and not in plaintext in the .pypirc file. To use
+keyring, first install keyring and set the password for
+the relevant repository, e.g.::
+
+    python -m keyring set <repository> <username>
+    Password for '<username>' in '<repository>': ********
+
+Then, in .pypirc, set the repository configuration as normal,
+but omit the password. Thereafter, uploads will use the
+password from the keyring.
+
+New in 20.1: Added keyring support.
+
+
+-----------------------------------------
+Configuring setup() using setup.cfg files
+-----------------------------------------
+
+.. note:: New in 30.3.0 (8 Dec 2016).
+
+.. important::
+    A ``setup.py`` file containing a ``setup()`` function call is still
+    required even if your configuration resides in ``setup.cfg``.
+
+``Setuptools`` allows using configuration files (usually :file:`setup.cfg`)
+to define a package’s metadata and other options that are normally supplied
+to the ``setup()`` function.
+
+This approach not only allows automation scenarios but also reduces
+boilerplate code in some cases.
+
+.. note::
+
+    This implementation has limited compatibility with the distutils2-like
+    ``setup.cfg`` sections used by the ``pbr`` and ``d2to1`` packages.
+
+    Namely: only metadata-related keys from ``metadata`` section are supported
+    (except for ``description-file``); keys from ``files``, ``entry_points``
+    and ``backwards_compat`` are not supported.
+
+
+.. code-block:: ini
+
+    [metadata]
+    name = my_package
+    version = attr: src.VERSION
+    description = My package description
+    long_description = file: README.rst, CHANGELOG.rst, LICENSE.rst
+    keywords = one, two
+    license = BSD 3-Clause License
+    classifiers =
+        Framework :: Django
+        Programming Language :: Python :: 3
+        Programming Language :: Python :: 3.5
+
+    [options]
+    zip_safe = False
+    include_package_data = True
+    packages = find:
+    scripts =
+      bin/first.py
+      bin/second.py
+
+    [options.package_data]
+    * = *.txt, *.rst
+    hello = *.msg
+
+    [options.extras_require]
+    pdf = ReportLab>=1.2; RXP
+    rest = docutils>=0.3; pack ==1.1, ==1.3
+
+    [options.packages.find]
+    exclude =
+        src.subpackage1
+        src.subpackage2
+
+
+Metadata and options are set in the config sections of the same name.
+
+* Keys are the same as the keyword arguments one provides to the ``setup()``
+  function.
+
+* Complex values can be written comma-separated or placed one per line
+  in *dangling* config values. The following are equivalent:
+
+  .. code-block:: ini
+
+      [metadata]
+      keywords = one, two
+
+      [metadata]
+      keywords =
+        one
+        two
+
+* In some cases, complex values can be provided in dedicated subsections for
+  clarity.
+
+* Some keys allow ``file:``, ``attr:``, and ``find:`` directives in order to
+  cover common usecases.
+
+* Unknown keys are ignored.
+
+
+Specifying values
+=================
+
+Some values are treated as simple strings, some allow more logic.
+
+Type names used below:
+
+* ``str`` - simple string
+* ``list-comma`` - dangling list or string of comma-separated values
+* ``list-semi`` - dangling list or string of semicolon-separated values
+* ``bool`` - ``True`` is 1, yes, true
+* ``dict`` - list-comma where keys are separated from values by ``=``
+* ``section`` - values are read from a dedicated (sub)section
+
+
+Special directives:
+
+* ``attr:`` - Value is read from a module attribute.  ``attr:`` supports
+  callables and iterables; unsupported types are cast using ``str()``.
+* ``file:`` - Value is read from a list of files and then concatenated
+
+
+.. note::
+    The ``file:`` directive is sandboxed and won't reach anything outside
+    the directory containing ``setup.py``.
+
+
+Metadata
+--------
+
+.. note::
+    The aliases given below are supported for compatibility reasons,
+    but their use is not advised.
+
+==============================  =================  =====
+Key                             Aliases            Type
+==============================  =================  =====
+name                                               str
+version                                            attr:, str
+url                             home-page          str
+download_url                    download-url       str
+project_urls                                       dict
+author                                             str
+author_email                    author-email       str
+maintainer                                         str
+maintainer_email                maintainer-email   str
+classifiers                     classifier         file:, list-comma
+license                                            file:, str
+description                     summary            file:, str
+long_description                long-description   file:, str
+long_description_content_type                      str
+keywords                                           list-comma
+platforms                       platform           list-comma
+provides                                           list-comma
+requires                                           list-comma
+obsoletes                                          list-comma
+==============================  =================  =====
+
+
+Options
+-------
+
+=======================  =====
+Key                      Type
+=======================  =====
+zip_safe                 bool
+setup_requires           list-semi
+install_requires         list-semi
+extras_require           section
+python_requires          str
+entry_points             file:, section
+use_2to3                 bool
+use_2to3_fixers          list-comma
+use_2to3_exclude_fixers  list-comma
+convert_2to3_doctests    list-comma
+scripts                  list-comma
+eager_resources          list-comma
+dependency_links         list-comma
+tests_require            list-semi
+include_package_data     bool
+packages                 find:, list-comma
+package_dir              dict
+package_data             section
+exclude_package_data     section
+namespace_packages       list-comma
+py_modules               list-comma
+=======================  =====
+
+.. note::
+
+    **packages** - The ``find:`` directive can be further configured
+    in a dedicated subsection ``options.packages.find``. This subsection
+    accepts the same keys as the `setuptools.find` function:
+    ``where``, ``include``, and ``exclude``.
+
+
+Configuration API
+=================
+
+Some automation tools may wish to access data from a configuration file.
+
+``Setuptools`` exposes a ``read_configuration()`` function for
+parsing ``metadata`` and ``options`` sections into a dictionary.
+
+
+.. code-block:: python
+
+    from setuptools.config import read_configuration
+
+    conf_dict = read_configuration('/home/user/dev/package/setup.cfg')
+
+
+By default, ``read_configuration()`` will read only the file provided
+in the first argument. To include values from other configuration files
+which could be in various places, set the ``find_others`` keyword argument
+to ``True``.
+
+If you have only a configuration file but not the whole package, you can still
+try to get data out of it with the help of the ``ignore_option_errors`` keyword
+argument. When it is set to ``True``, all options with errors possibly produced
+by directives, such as ``attr:`` and others, will be silently ignored.
+As a consequence, the resulting dictionary will include no such options.
+
+
+--------------------------------
+Extending and Reusing Setuptools
+--------------------------------
+
+Creating ``distutils`` Extensions
+=================================
+
+It can be hard to add new commands or setup arguments to the distutils.  But
+the ``setuptools`` package makes it a bit easier, by allowing you to distribute
+a distutils extension as a separate project, and then have projects that need
+the extension just refer to it in their ``setup_requires`` argument.
+
+With ``setuptools``, your distutils extension projects can hook in new
+commands and ``setup()`` arguments just by defining "entry points".  These
+are mappings from command or argument names to a specification of where to
+import a handler from.  (See the section on `Dynamic Discovery of Services and
+Plugins`_ above for some more background on entry points.)
+
+
+Adding Commands
+---------------
+
+You can add new ``setup`` commands by defining entry points in the
+``distutils.commands`` group.  For example, if you wanted to add a ``foo``
+command, you might add something like this to your distutils extension
+project's setup script::
+
+    setup(
+        # ...
+        entry_points={
+            "distutils.commands": [
+                "foo = mypackage.some_module:foo",
+            ],
+        },
+    )
+
+(Assuming, of course, that the ``foo`` class in ``mypackage.some_module`` is
+a ``setuptools.Command`` subclass.)
+
+Once a project containing such entry points has been activated on ``sys.path``,
+(e.g. by running "install" or "develop" with a site-packages installation
+directory) the command(s) will be available to any ``setuptools``-based setup
+scripts.  It is not necessary to use the ``--command-packages`` option or
+to monkeypatch the ``distutils.command`` package to install your commands;
+``setuptools`` automatically adds a wrapper to the distutils to search for
+entry points in the active distributions on ``sys.path``.  In fact, this is
+how setuptools' own commands are installed: the setuptools project's setup
+script defines entry points for them!
+
+
+Adding ``setup()`` Arguments
+----------------------------
+
+Sometimes, your commands may need additional arguments to the ``setup()``
+call.  You can enable this by defining entry points in the
+``distutils.setup_keywords`` group.  For example, if you wanted a ``setup()``
+argument called ``bar_baz``, you might add something like this to your
+distutils extension project's setup script::
+
+    setup(
+        # ...
+        entry_points={
+            "distutils.commands": [
+                "foo = mypackage.some_module:foo",
+            ],
+            "distutils.setup_keywords": [
+                "bar_baz = mypackage.some_module:validate_bar_baz",
+            ],
+        },
+    )
+
+The idea here is that the entry point defines a function that will be called
+to validate the ``setup()`` argument, if it's supplied.  The ``Distribution``
+object will have the initial value of the attribute set to ``None``, and the
+validation function will only be called if the ``setup()`` call sets it to
+a non-None value.  Here's an example validation function::
+
+    def assert_bool(dist, attr, value):
+        """Verify that value is True, False, 0, or 1"""
+        if bool(value) != value:
+            raise DistutilsSetupError(
+                "%r must be a boolean value (got %r)" % (attr,value)
+            )
+
+Your function should accept three arguments: the ``Distribution`` object,
+the attribute name, and the attribute value.  It should raise a
+``DistutilsSetupError`` (from the ``distutils.errors`` module) if the argument
+is invalid.  Remember, your function will only be called with non-None values,
+and the default value of arguments defined this way is always None.  So, your
+commands should always be prepared for the possibility that the attribute will
+be ``None`` when they access it later.
+
+If more than one active distribution defines an entry point for the same
+``setup()`` argument, *all* of them will be called.  This allows multiple
+distutils extensions to define a common argument, as long as they agree on
+what values of that argument are valid.
+
+Also note that as with commands, it is not necessary to subclass or monkeypatch
+the distutils ``Distribution`` class in order to add your arguments; it is
+sufficient to define the entry points in your extension, as long as any setup
+script using your extension lists your project in its ``setup_requires``
+argument.
+
+
+Adding new EGG-INFO Files
+-------------------------
+
+Some extensible applications or frameworks may want to allow third parties to
+develop plugins with application or framework-specific metadata included in
+the plugins' EGG-INFO directory, for easy access via the ``pkg_resources``
+metadata API.  The easiest way to allow this is to create a distutils extension
+to be used from the plugin projects' setup scripts (via ``setup_requires``)
+that defines a new setup keyword, and then uses that data to write an EGG-INFO
+file when the ``egg_info`` command is run.
+
+The ``egg_info`` command looks for extension points in an ``egg_info.writers``
+group, and calls them to write the files.  Here's a simple example of a
+distutils extension defining a setup argument ``foo_bar``, which is a list of
+lines that will be written to ``foo_bar.txt`` in the EGG-INFO directory of any
+project that uses the argument::
+
+    setup(
+        # ...
+        entry_points={
+            "distutils.setup_keywords": [
+                "foo_bar = setuptools.dist:assert_string_list",
+            ],
+            "egg_info.writers": [
+                "foo_bar.txt = setuptools.command.egg_info:write_arg",
+            ],
+        },
+    )
+
+This simple example makes use of two utility functions defined by setuptools
+for its own use: a routine to validate that a setup keyword is a sequence of
+strings, and another one that looks up a setup argument and writes it to
+a file.  Here's what the writer utility looks like::
+
+    def write_arg(cmd, basename, filename):
+        argname = os.path.splitext(basename)[0]
+        value = getattr(cmd.distribution, argname, None)
+        if value is not None:
+            value = '\n'.join(value) + '\n'
+        cmd.write_or_delete_file(argname, filename, value)
+
+As you can see, ``egg_info.writers`` entry points must be a function taking
+three arguments: a ``egg_info`` command instance, the basename of the file to
+write (e.g. ``foo_bar.txt``), and the actual full filename that should be
+written to.
+
+In general, writer functions should honor the command object's ``dry_run``
+setting when writing files, and use the ``distutils.log`` object to do any
+console output.  The easiest way to conform to this requirement is to use
+the ``cmd`` object's ``write_file()``, ``delete_file()``, and
+``write_or_delete_file()`` methods exclusively for your file operations.  See
+those methods' docstrings for more details.
+
+
+Adding Support for Revision Control Systems
+-------------------------------------------------
+
+If the files you want to include in the source distribution are tracked using
+Git, Mercurial or SVN, you can use the following packages to achieve that:
+
+- Git and Mercurial: `setuptools_scm <https://pypi.org/project/setuptools_scm/>`_
+- SVN: `setuptools_svn <https://pypi.org/project/setuptools_svn/>`_
+
+If you would like to create a plugin for ``setuptools`` to find files tracked
+by another revision control system, you can do so by adding an entry point to
+the ``setuptools.file_finders`` group.  The entry point should be a function
+accepting a single directory name, and should yield all the filenames within
+that directory (and any subdirectories thereof) that are under revision
+control.
+
+For example, if you were going to create a plugin for a revision control system
+called "foobar", you would write a function something like this:
+
+.. code-block:: python
+
+    def find_files_for_foobar(dirname):
+        # loop to yield paths that start with `dirname`
+
+And you would register it in a setup script using something like this::
+
+    entry_points={
+        "setuptools.file_finders": [
+            "foobar = my_foobar_module:find_files_for_foobar",
+        ]
+    }
+
+Then, anyone who wants to use your plugin can simply install it, and their
+local setuptools installation will be able to find the necessary files.
+
+It is not necessary to distribute source control plugins with projects that
+simply use the other source control system, or to specify the plugins in
+``setup_requires``.  When you create a source distribution with the ``sdist``
+command, setuptools automatically records what files were found in the
+``SOURCES.txt`` file.  That way, recipients of source distributions don't need
+to have revision control at all.  However, if someone is working on a package
+by checking out with that system, they will need the same plugin(s) that the
+original author is using.
+
+A few important points for writing revision control file finders:
+
+* Your finder function MUST return relative paths, created by appending to the
+  passed-in directory name.  Absolute paths are NOT allowed, nor are relative
+  paths that reference a parent directory of the passed-in directory.
+
+* Your finder function MUST accept an empty string as the directory name,
+  meaning the current directory.  You MUST NOT convert this to a dot; just
+  yield relative paths.  So, yielding a subdirectory named ``some/dir`` under
+  the current directory should NOT be rendered as ``./some/dir`` or
+  ``/somewhere/some/dir``, but *always* as simply ``some/dir``
+
+* Your finder function SHOULD NOT raise any errors, and SHOULD deal gracefully
+  with the absence of needed programs (i.e., ones belonging to the revision
+  control system itself.  It *may*, however, use ``distutils.log.warn()`` to
+  inform the user of the missing program(s).
+
+
+Subclassing ``Command``
+-----------------------
+
+Sorry, this section isn't written yet, and neither is a lot of what's below
+this point.
+
+XXX
+
+
+Reusing ``setuptools`` Code
+===========================
+
+``ez_setup``
+------------
+
+XXX
+
+
+``setuptools.archive_util``
+---------------------------
+
+XXX
+
+
+``setuptools.sandbox``
+----------------------
+
+XXX
+
+
+``setuptools.package_index``
+----------------------------
+
+XXX
+
+
+Mailing List and Bug Tracker
+============================
+
+Please use the `distutils-sig mailing list`_ for questions and discussion about
+setuptools, and the `setuptools bug tracker`_ ONLY for issues you have
+confirmed via the list are actual bugs, and which you have reduced to a minimal
+set of steps to reproduce.
+
+.. _distutils-sig mailing list: http://mail.python.org/pipermail/distutils-sig/
+.. _setuptools bug tracker: https://github.com/pypa/setuptools/
diff --git a/easy_install.py b/easy_install.py
new file mode 100755
index 0000000..d87e984
--- /dev/null
+++ b/easy_install.py
@@ -0,0 +1,5 @@
+"""Run the EasyInstall command"""
+
+if __name__ == '__main__':
+    from setuptools.command.easy_install import main
+    main()
diff --git a/launcher.c b/launcher.c
new file mode 100755
index 0000000..be69f0c
--- /dev/null
+++ b/launcher.c
@@ -0,0 +1,335 @@
+/*  Setuptools Script Launcher for Windows
+
+    This is a stub executable for Windows that functions somewhat like
+    Effbot's "exemaker", in that it runs a script with the same name but
+    a .py extension, using information from a #! line.  It differs in that
+    it spawns the actual Python executable, rather than attempting to
+    hook into the Python DLL.  This means that the script will run with
+    sys.executable set to the Python executable, where exemaker ends up with
+    sys.executable pointing to itself.  (Which means it won't work if you try
+    to run another Python process using sys.executable.)
+
+    To build/rebuild with mingw32, do this in the setuptools project directory:
+
+       gcc -DGUI=0           -mno-cygwin -O -s -o setuptools/cli.exe launcher.c
+       gcc -DGUI=1 -mwindows -mno-cygwin -O -s -o setuptools/gui.exe launcher.c
+
+    To build for Windows RT, install both Visual Studio Express for Windows 8
+    and for Windows Desktop (both freeware), create "win32" application using
+    "Windows Desktop" version, create new "ARM" target via
+    "Configuration Manager" menu and modify ".vcxproj" file by adding
+    "<WindowsSDKDesktopARMSupport>true</WindowsSDKDesktopARMSupport>" tag
+    as child of "PropertyGroup" tags that has "Debug|ARM" and "Release|ARM"
+    properties.
+
+    It links to msvcrt.dll, but this shouldn't be a problem since it doesn't
+    actually run Python in the same process.  Note that using 'exec' instead
+    of 'spawn' doesn't work, because on Windows this leads to the Python
+    executable running in the *background*, attached to the same console
+    window, meaning you get a command prompt back *before* Python even finishes
+    starting.  So, we have to use spawnv() and wait for Python to exit before
+    continuing.  :(
+*/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <windows.h>
+#include <tchar.h>
+#include <fcntl.h>
+
+int child_pid=0;
+
+int fail(char *format, char *data) {
+    /* Print error message to stderr and return 2 */
+    fprintf(stderr, format, data);
+    return 2;
+}
+
+char *quoted(char *data) {
+    int i, ln = strlen(data), nb;
+
+    /* We allocate twice as much space as needed to deal with worse-case
+       of having to escape everything. */
+    char *result = calloc(ln*2+3, sizeof(char));
+    char *presult = result;
+
+    *presult++ = '"';
+    for (nb=0, i=0; i < ln; i++)
+      {
+        if (data[i] == '\\')
+          nb += 1;
+        else if (data[i] == '"')
+          {
+            for (; nb > 0; nb--)
+              *presult++ = '\\';
+            *presult++ = '\\';
+          }
+        else
+          nb = 0;
+        *presult++ = data[i];
+      }
+
+    for (; nb > 0; nb--)        /* Deal w trailing slashes */
+      *presult++ = '\\';
+
+    *presult++ = '"';
+    *presult++ = 0;
+    return result;
+}
+
+
+
+
+
+
+
+
+
+
+char *loadable_exe(char *exename) {
+    /* HINSTANCE hPython;  DLL handle for python executable */
+    char *result;
+
+    /* hPython = LoadLibraryEx(exename, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
+    if (!hPython) return NULL; */
+
+    /* Return the absolute filename for spawnv */
+    result = calloc(MAX_PATH, sizeof(char));
+    strncpy(result, exename, MAX_PATH);
+    /*if (result) GetModuleFileNameA(hPython, result, MAX_PATH);
+
+    FreeLibrary(hPython); */
+    return result;
+}
+
+
+char *find_exe(char *exename, char *script) {
+    char drive[_MAX_DRIVE], dir[_MAX_DIR], fname[_MAX_FNAME], ext[_MAX_EXT];
+    char path[_MAX_PATH], c, *result;
+
+    /* convert slashes to backslashes for uniform search below */
+    result = exename;
+    while (c = *result++) if (c=='/') result[-1] = '\\';
+
+    _splitpath(exename, drive, dir, fname, ext);
+    if (drive[0] || dir[0]=='\\') {
+        return loadable_exe(exename);   /* absolute path, use directly */
+    }
+    /* Use the script's parent directory, which should be the Python home
+       (This should only be used for bdist_wininst-installed scripts, because
+        easy_install-ed scripts use the absolute path to python[w].exe
+    */
+    _splitpath(script, drive, dir, fname, ext);
+    result = dir + strlen(dir) -1;
+    if (*result == '\\') result--;
+    while (*result != '\\' && result>=dir) *result-- = 0;
+    _makepath(path, drive, dir, exename, NULL);
+    return loadable_exe(path);
+}
+
+
+char **parse_argv(char *cmdline, int *argc)
+{
+    /* Parse a command line in-place using MS C rules */
+
+    char **result = calloc(strlen(cmdline), sizeof(char *));
+    char *output = cmdline;
+    char c;
+    int nb = 0;
+    int iq = 0;
+    *argc = 0;
+
+    result[0] = output;
+    while (isspace(*cmdline)) cmdline++;   /* skip leading spaces */
+
+    do {
+        c = *cmdline++;
+        if (!c || (isspace(c) && !iq)) {
+            while (nb) {*output++ = '\\'; nb--; }
+            *output++ = 0;
+            result[++*argc] = output;
+            if (!c) return result;
+            while (isspace(*cmdline)) cmdline++;  /* skip leading spaces */
+            if (!*cmdline) return result;  /* avoid empty arg if trailing ws */
+            continue;
+        }
+        if (c == '\\')
+            ++nb;   /* count \'s */
+        else {
+            if (c == '"') {
+                if (!(nb & 1)) { iq = !iq; c = 0; }  /* skip " unless odd # of \ */
+                nb = nb >> 1;   /* cut \'s in half */
+            }
+            while (nb) {*output++ = '\\'; nb--; }
+            if (c) *output++ = c;
+        }
+    } while (1);
+}
+
+void pass_control_to_child(DWORD control_type) {
+    /*
+     * distribute-issue207
+     * passes the control event to child process (Python)
+     */
+    if (!child_pid) {
+        return;
+    }
+    GenerateConsoleCtrlEvent(child_pid,0);
+}
+
+BOOL control_handler(DWORD control_type) {
+    /* 
+     * distribute-issue207
+     * control event handler callback function
+     */
+    switch (control_type) {
+        case CTRL_C_EVENT:
+            pass_control_to_child(0);
+            break;
+    }
+    return TRUE;
+}
+
+int create_and_wait_for_subprocess(char* command) {
+    /*
+     * distribute-issue207
+     * launches child process (Python)
+     */
+    DWORD return_value = 0;
+    LPSTR commandline = command;
+    STARTUPINFOA s_info;
+    PROCESS_INFORMATION p_info;
+    ZeroMemory(&p_info, sizeof(p_info));
+    ZeroMemory(&s_info, sizeof(s_info));
+    s_info.cb = sizeof(STARTUPINFO);
+    // set-up control handler callback funciotn
+    SetConsoleCtrlHandler((PHANDLER_ROUTINE) control_handler, TRUE);
+    if (!CreateProcessA(NULL, commandline, NULL, NULL, TRUE, 0, NULL, NULL, &s_info, &p_info)) {
+        fprintf(stderr, "failed to create process.\n");
+        return 0;
+    }   
+    child_pid = p_info.dwProcessId;
+    // wait for Python to exit
+    WaitForSingleObject(p_info.hProcess, INFINITE);
+    if (!GetExitCodeProcess(p_info.hProcess, &return_value)) {
+        fprintf(stderr, "failed to get exit code from process.\n");
+        return 0;
+    }
+    return return_value;
+}
+
+char* join_executable_and_args(char *executable, char **args, int argc)
+{
+    /*
+     * distribute-issue207
+     * CreateProcess needs a long string of the executable and command-line arguments,
+     * so we need to convert it from the args that was built
+     */
+    int len,counter;
+    char* cmdline;
+    
+    len=strlen(executable)+2;
+    for (counter=1; counter<argc; counter++) {
+        len+=strlen(args[counter])+1;
+    }
+
+    cmdline = (char*)calloc(len, sizeof(char));
+    sprintf(cmdline, "%s", executable);
+    len=strlen(executable);
+    for (counter=1; counter<argc; counter++) {
+        sprintf(cmdline+len, " %s", args[counter]);
+        len+=strlen(args[counter])+1;
+    }
+    return cmdline;
+}
+
+int run(int argc, char **argv, int is_gui) {
+
+    char python[256];   /* python executable's filename*/
+    char *pyopt;        /* Python option */
+    char script[256];   /* the script's filename */
+
+    int scriptf;        /* file descriptor for script file */
+
+    char **newargs, **newargsp, **parsedargs; /* argument array for exec */
+    char *ptr, *end;    /* working pointers for string manipulation */
+    char *cmdline;
+    int i, parsedargc;              /* loop counter */
+
+    /* compute script name from our .exe name*/
+    GetModuleFileNameA(NULL, script, sizeof(script));
+    end = script + strlen(script);
+    while( end>script && *end != '.')
+        *end-- = '\0';
+    *end-- = '\0';
+    strcat(script, (GUI ? "-script.pyw" : "-script.py"));
+
+    /* figure out the target python executable */
+
+    scriptf = open(script, O_RDONLY);
+    if (scriptf == -1) {
+        return fail("Cannot open %s\n", script);
+    }
+    end = python + read(scriptf, python, sizeof(python));
+    close(scriptf);
+
+    ptr = python-1;
+    while(++ptr < end && *ptr && *ptr!='\n' && *ptr!='\r') {;}
+
+    *ptr-- = '\0';
+
+    if (strncmp(python, "#!", 2)) {
+        /* default to python.exe if no #! header */
+        strcpy(python, "#!python.exe");
+    }
+
+    parsedargs = parse_argv(python+2, &parsedargc);
+
+    /* Using spawnv() can fail strangely if you e.g. find the Cygwin
+       Python, so we'll make sure Windows can find and load it */
+
+    ptr = find_exe(parsedargs[0], script);
+    if (!ptr) {
+        return fail("Cannot find Python executable %s\n", parsedargs[0]);
+    }
+
+    /* printf("Python executable: %s\n", ptr); */
+
+    /* Argument array needs to be
+       parsedargc + argc, plus 1 for null sentinel */
+
+    newargs = (char **)calloc(parsedargc + argc + 1, sizeof(char *));
+    newargsp = newargs;
+
+    *newargsp++ = quoted(ptr);
+    for (i = 1; i<parsedargc; i++) *newargsp++ = quoted(parsedargs[i]);
+
+    *newargsp++ = quoted(script);
+    for (i = 1; i < argc; i++)     *newargsp++ = quoted(argv[i]);
+
+    *newargsp++ = NULL;
+
+    /* printf("args 0: %s\nargs 1: %s\n", newargs[0], newargs[1]); */
+
+    if (is_gui) {
+        /* Use exec, we don't need to wait for the GUI to finish */
+        execv(ptr, (const char * const *)(newargs));
+        return fail("Could not exec %s", ptr);   /* shouldn't get here! */
+    }
+
+    /*
+     * distribute-issue207: using CreateProcessA instead of spawnv
+     */
+    cmdline = join_executable_and_args(ptr, newargs, parsedargc + argc);
+    return create_and_wait_for_subprocess(cmdline);
+}
+
+int WINAPI WinMain(HINSTANCE hI, HINSTANCE hP, LPSTR lpCmd, int nShow) {
+    return run(__argc, __argv, GUI);
+}
+
+int main(int argc, char** argv) {
+    return run(argc, argv, GUI);
+}
+
diff --git a/msvc-build-launcher.cmd b/msvc-build-launcher.cmd
new file mode 100644
index 0000000..92da290
--- /dev/null
+++ b/msvc-build-launcher.cmd
@@ -0,0 +1,39 @@
+@echo off

+

+REM Use old Windows SDK 6.1 so created .exe will be compatible with

+REM old Windows versions.

+REM Windows SDK 6.1 may be downloaded at:

+REM  http://www.microsoft.com/en-us/download/details.aspx?id=11310

+set PATH_OLD=%PATH%

+

+REM The SDK creates a false install of Visual Studio at one of these locations

+set PATH=C:\Program Files\Microsoft Visual Studio 9.0\VC\bin;%PATH%

+set PATH=C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin;%PATH%

+

+REM set up the environment to compile to x86

+call VCVARS32

+if "%ERRORLEVEL%"=="0" (

+  cl /D "GUI=0" /D "WIN32_LEAN_AND_MEAN" launcher.c /O2 /link /MACHINE:x86 /SUBSYSTEM:CONSOLE /out:setuptools/cli-32.exe

+  cl /D "GUI=1" /D "WIN32_LEAN_AND_MEAN" launcher.c /O2 /link /MACHINE:x86 /SUBSYSTEM:WINDOWS /out:setuptools/gui-32.exe

+) else (

+  echo Windows SDK 6.1 not found to build Windows 32-bit version

+)

+

+REM buildout (and possibly other implementations) currently depend on

+REM the 32-bit launcher scripts without the -32 in the filename, so copy them

+REM there for now.

+copy setuptools/cli-32.exe setuptools/cli.exe

+copy setuptools/gui-32.exe setuptools/gui.exe

+

+REM now for 64-bit

+REM Use the x86_amd64 profile, which is the 32-bit cross compiler for amd64

+call VCVARSx86_amd64

+if "%ERRORLEVEL%"=="0" (

+  cl /D "GUI=0" /D "WIN32_LEAN_AND_MEAN" launcher.c /O2 /link /MACHINE:x64 /SUBSYSTEM:CONSOLE /out:setuptools/cli-64.exe

+  cl /D "GUI=1" /D "WIN32_LEAN_AND_MEAN" launcher.c /O2 /link /MACHINE:x64 /SUBSYSTEM:WINDOWS /out:setuptools/gui-64.exe

+) else (

+  echo Windows SDK 6.1 not found to build Windows 64-bit version

+)

+

+set PATH=%PATH_OLD%

+

diff --git a/pavement.py b/pavement.py
new file mode 100644
index 0000000..84e5825
--- /dev/null
+++ b/pavement.py
@@ -0,0 +1,62 @@
+import re
+
+from paver.easy import task, path as Path
+import pip
+
+
+def remove_all(paths):
+    for path in paths:
+        path.rmtree() if path.isdir() else path.remove()
+
+
+@task
+def update_vendored():
+    update_pkg_resources()
+    update_setuptools()
+
+
+def rewrite_packaging(pkg_files, new_root):
+    """
+    Rewrite imports in packaging to redirect to vendored copies.
+    """
+    for file in pkg_files.glob('*.py'):
+        text = file.text()
+        text = re.sub(r' (pyparsing|six)', rf' {new_root}.\1', text)
+        file.write_text(text)
+
+
+def clean(vendor):
+    """
+    Remove all files out of the vendor directory except the meta
+    data (as pip uninstall doesn't support -t).
+    """
+    remove_all(
+        path
+        for path in vendor.glob('*')
+        if path.basename() != 'vendored.txt'
+    )
+
+
+def install(vendor):
+    clean(vendor)
+    install_args = [
+        'install',
+        '-r', str(vendor / 'vendored.txt'),
+        '-t', str(vendor),
+    ]
+    pip.main(install_args)
+    remove_all(vendor.glob('*.dist-info'))
+    remove_all(vendor.glob('*.egg-info'))
+    (vendor / '__init__.py').write_text('')
+
+
+def update_pkg_resources():
+    vendor = Path('pkg_resources/_vendor')
+    install(vendor)
+    rewrite_packaging(vendor / 'packaging', 'pkg_resources.extern')
+
+
+def update_setuptools():
+    vendor = Path('setuptools/_vendor')
+    install(vendor)
+    rewrite_packaging(vendor / 'packaging', 'setuptools.extern')
diff --git a/pkg_resources/__init__.py b/pkg_resources/__init__.py
new file mode 100644
index 0000000..d5b0fe9
--- /dev/null
+++ b/pkg_resources/__init__.py
@@ -0,0 +1,3123 @@
+# coding: utf-8
+"""
+Package resource API
+--------------------
+
+A resource is a logical file contained within a package, or a logical
+subdirectory thereof.  The package resource API expects resource names
+to have their path parts separated with ``/``, *not* whatever the local
+path separator is.  Do not use os.path operations to manipulate resource
+names being passed into the API.
+
+The package resource API is designed to work with normal filesystem packages,
+.egg files, and unpacked .egg files.  It can also work in a limited way with
+.zip files and with custom PEP 302 loaders that support the ``get_data()``
+method.
+"""
+
+from __future__ import absolute_import
+
+import sys
+import os
+import io
+import time
+import re
+import types
+import zipfile
+import zipimport
+import warnings
+import stat
+import functools
+import pkgutil
+import operator
+import platform
+import collections
+import plistlib
+import email.parser
+import errno
+import tempfile
+import textwrap
+import itertools
+import inspect
+from pkgutil import get_importer
+
+try:
+    import _imp
+except ImportError:
+    # Python 3.2 compatibility
+    import imp as _imp
+
+from pkg_resources.extern import six
+from pkg_resources.extern.six.moves import urllib, map, filter
+
+# capture these to bypass sandboxing
+from os import utime
+try:
+    from os import mkdir, rename, unlink
+    WRITE_SUPPORT = True
+except ImportError:
+    # no write support, probably under GAE
+    WRITE_SUPPORT = False
+
+from os import open as os_open
+from os.path import isdir, split
+
+try:
+    import importlib.machinery as importlib_machinery
+    # access attribute to force import under delayed import mechanisms.
+    importlib_machinery.__name__
+except ImportError:
+    importlib_machinery = None
+
+from . import py31compat
+from pkg_resources.extern import appdirs
+from pkg_resources.extern import packaging
+__import__('pkg_resources.extern.packaging.version')
+__import__('pkg_resources.extern.packaging.specifiers')
+__import__('pkg_resources.extern.packaging.requirements')
+__import__('pkg_resources.extern.packaging.markers')
+
+
+if (3, 0) < sys.version_info < (3, 3):
+    raise RuntimeError("Python 3.3 or later is required")
+
+if six.PY2:
+    # Those builtin exceptions are only defined in Python 3
+    PermissionError = None
+    NotADirectoryError = None
+
+# declare some globals that will be defined later to
+# satisfy the linters.
+require = None
+working_set = None
+add_activation_listener = None
+resources_stream = None
+cleanup_resources = None
+resource_dir = None
+resource_stream = None
+set_extraction_path = None
+resource_isdir = None
+resource_string = None
+iter_entry_points = None
+resource_listdir = None
+resource_filename = None
+resource_exists = None
+_distribution_finders = None
+_namespace_handlers = None
+_namespace_packages = None
+
+
+class PEP440Warning(RuntimeWarning):
+    """
+    Used when there is an issue with a version or specifier not complying with
+    PEP 440.
+    """
+
+
+def parse_version(v):
+    try:
+        return packaging.version.Version(v)
+    except packaging.version.InvalidVersion:
+        return packaging.version.LegacyVersion(v)
+
+
+_state_vars = {}
+
+
+def _declare_state(vartype, **kw):
+    globals().update(kw)
+    _state_vars.update(dict.fromkeys(kw, vartype))
+
+
+def __getstate__():
+    state = {}
+    g = globals()
+    for k, v in _state_vars.items():
+        state[k] = g['_sget_' + v](g[k])
+    return state
+
+
+def __setstate__(state):
+    g = globals()
+    for k, v in state.items():
+        g['_sset_' + _state_vars[k]](k, g[k], v)
+    return state
+
+
+def _sget_dict(val):
+    return val.copy()
+
+
+def _sset_dict(key, ob, state):
+    ob.clear()
+    ob.update(state)
+
+
+def _sget_object(val):
+    return val.__getstate__()
+
+
+def _sset_object(key, ob, state):
+    ob.__setstate__(state)
+
+
+_sget_none = _sset_none = lambda *args: None
+
+
+def get_supported_platform():
+    """Return this platform's maximum compatible version.
+
+    distutils.util.get_platform() normally reports the minimum version
+    of Mac OS X that would be required to *use* extensions produced by
+    distutils.  But what we want when checking compatibility is to know the
+    version of Mac OS X that we are *running*.  To allow usage of packages that
+    explicitly require a newer version of Mac OS X, we must also know the
+    current version of the OS.
+
+    If this condition occurs for any other platform with a version in its
+    platform strings, this function should be extended accordingly.
+    """
+    plat = get_build_platform()
+    m = macosVersionString.match(plat)
+    if m is not None and sys.platform == "darwin":
+        try:
+            plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
+        except ValueError:
+            # not Mac OS X
+            pass
+    return plat
+
+
+__all__ = [
+    # Basic resource access and distribution/entry point discovery
+    'require', 'run_script', 'get_provider', 'get_distribution',
+    'load_entry_point', 'get_entry_map', 'get_entry_info',
+    'iter_entry_points',
+    'resource_string', 'resource_stream', 'resource_filename',
+    'resource_listdir', 'resource_exists', 'resource_isdir',
+
+    # Environmental control
+    'declare_namespace', 'working_set', 'add_activation_listener',
+    'find_distributions', 'set_extraction_path', 'cleanup_resources',
+    'get_default_cache',
+
+    # Primary implementation classes
+    'Environment', 'WorkingSet', 'ResourceManager',
+    'Distribution', 'Requirement', 'EntryPoint',
+
+    # Exceptions
+    'ResolutionError', 'VersionConflict', 'DistributionNotFound',
+    'UnknownExtra', 'ExtractionError',
+
+    # Warnings
+    'PEP440Warning',
+
+    # Parsing functions and string utilities
+    'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
+    'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
+    'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
+
+    # filesystem utilities
+    'ensure_directory', 'normalize_path',
+
+    # Distribution "precedence" constants
+    'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
+
+    # "Provider" interfaces, implementations, and registration/lookup APIs
+    'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
+    'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
+    'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
+    'register_finder', 'register_namespace_handler', 'register_loader_type',
+    'fixup_namespace_packages', 'get_importer',
+
+    # Deprecated/backward compatibility only
+    'run_main', 'AvailableDistributions',
+]
+
+
+class ResolutionError(Exception):
+    """Abstract base for dependency resolution errors"""
+
+    def __repr__(self):
+        return self.__class__.__name__ + repr(self.args)
+
+
+class VersionConflict(ResolutionError):
+    """
+    An already-installed version conflicts with the requested version.
+
+    Should be initialized with the installed Distribution and the requested
+    Requirement.
+    """
+
+    _template = "{self.dist} is installed but {self.req} is required"
+
+    @property
+    def dist(self):
+        return self.args[0]
+
+    @property
+    def req(self):
+        return self.args[1]
+
+    def report(self):
+        return self._template.format(**locals())
+
+    def with_context(self, required_by):
+        """
+        If required_by is non-empty, return a version of self that is a
+        ContextualVersionConflict.
+        """
+        if not required_by:
+            return self
+        args = self.args + (required_by,)
+        return ContextualVersionConflict(*args)
+
+
+class ContextualVersionConflict(VersionConflict):
+    """
+    A VersionConflict that accepts a third parameter, the set of the
+    requirements that required the installed Distribution.
+    """
+
+    _template = VersionConflict._template + ' by {self.required_by}'
+
+    @property
+    def required_by(self):
+        return self.args[2]
+
+
+class DistributionNotFound(ResolutionError):
+    """A requested distribution was not found"""
+
+    _template = ("The '{self.req}' distribution was not found "
+                 "and is required by {self.requirers_str}")
+
+    @property
+    def req(self):
+        return self.args[0]
+
+    @property
+    def requirers(self):
+        return self.args[1]
+
+    @property
+    def requirers_str(self):
+        if not self.requirers:
+            return 'the application'
+        return ', '.join(self.requirers)
+
+    def report(self):
+        return self._template.format(**locals())
+
+    def __str__(self):
+        return self.report()
+
+
+class UnknownExtra(ResolutionError):
+    """Distribution doesn't have an "extra feature" of the given name"""
+
+
+_provider_factories = {}
+
+PY_MAJOR = sys.version[:3]
+EGG_DIST = 3
+BINARY_DIST = 2
+SOURCE_DIST = 1
+CHECKOUT_DIST = 0
+DEVELOP_DIST = -1
+
+
+def register_loader_type(loader_type, provider_factory):
+    """Register `provider_factory` to make providers for `loader_type`
+
+    `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
+    and `provider_factory` is a function that, passed a *module* object,
+    returns an ``IResourceProvider`` for that module.
+    """
+    _provider_factories[loader_type] = provider_factory
+
+
+def get_provider(moduleOrReq):
+    """Return an IResourceProvider for the named module or requirement"""
+    if isinstance(moduleOrReq, Requirement):
+        return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
+    try:
+        module = sys.modules[moduleOrReq]
+    except KeyError:
+        __import__(moduleOrReq)
+        module = sys.modules[moduleOrReq]
+    loader = getattr(module, '__loader__', None)
+    return _find_adapter(_provider_factories, loader)(module)
+
+
+def _macosx_vers(_cache=[]):
+    if not _cache:
+        version = platform.mac_ver()[0]
+        # fallback for MacPorts
+        if version == '':
+            plist = '/System/Library/CoreServices/SystemVersion.plist'
+            if os.path.exists(plist):
+                if hasattr(plistlib, 'readPlist'):
+                    plist_content = plistlib.readPlist(plist)
+                    if 'ProductVersion' in plist_content:
+                        version = plist_content['ProductVersion']
+
+        _cache.append(version.split('.'))
+    return _cache[0]
+
+
+def _macosx_arch(machine):
+    return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
+
+
+def get_build_platform():
+    """Return this platform's string for platform-specific distributions
+
+    XXX Currently this is the same as ``distutils.util.get_platform()``, but it
+    needs some hacks for Linux and Mac OS X.
+    """
+    try:
+        # Python 2.7 or >=3.2
+        from sysconfig import get_platform
+    except ImportError:
+        from distutils.util import get_platform
+
+    plat = get_platform()
+    if sys.platform == "darwin" and not plat.startswith('macosx-'):
+        try:
+            version = _macosx_vers()
+            machine = os.uname()[4].replace(" ", "_")
+            return "macosx-%d.%d-%s" % (
+                int(version[0]), int(version[1]),
+                _macosx_arch(machine),
+            )
+        except ValueError:
+            # if someone is running a non-Mac darwin system, this will fall
+            # through to the default implementation
+            pass
+    return plat
+
+
+macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
+darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
+# XXX backward compat
+get_platform = get_build_platform
+
+
+def compatible_platforms(provided, required):
+    """Can code for the `provided` platform run on the `required` platform?
+
+    Returns true if either platform is ``None``, or the platforms are equal.
+
+    XXX Needs compatibility checks for Linux and other unixy OSes.
+    """
+    if provided is None or required is None or provided == required:
+        # easy case
+        return True
+
+    # Mac OS X special cases
+    reqMac = macosVersionString.match(required)
+    if reqMac:
+        provMac = macosVersionString.match(provided)
+
+        # is this a Mac package?
+        if not provMac:
+            # this is backwards compatibility for packages built before
+            # setuptools 0.6. All packages built after this point will
+            # use the new macosx designation.
+            provDarwin = darwinVersionString.match(provided)
+            if provDarwin:
+                dversion = int(provDarwin.group(1))
+                macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
+                if dversion == 7 and macosversion >= "10.3" or \
+                        dversion == 8 and macosversion >= "10.4":
+                    return True
+            # egg isn't macosx or legacy darwin
+            return False
+
+        # are they the same major version and machine type?
+        if provMac.group(1) != reqMac.group(1) or \
+                provMac.group(3) != reqMac.group(3):
+            return False
+
+        # is the required OS major update >= the provided one?
+        if int(provMac.group(2)) > int(reqMac.group(2)):
+            return False
+
+        return True
+
+    # XXX Linux and other platforms' special cases should go here
+    return False
+
+
+def run_script(dist_spec, script_name):
+    """Locate distribution `dist_spec` and run its `script_name` script"""
+    ns = sys._getframe(1).f_globals
+    name = ns['__name__']
+    ns.clear()
+    ns['__name__'] = name
+    require(dist_spec)[0].run_script(script_name, ns)
+
+
+# backward compatibility
+run_main = run_script
+
+
+def get_distribution(dist):
+    """Return a current distribution object for a Requirement or string"""
+    if isinstance(dist, six.string_types):
+        dist = Requirement.parse(dist)
+    if isinstance(dist, Requirement):
+        dist = get_provider(dist)
+    if not isinstance(dist, Distribution):
+        raise TypeError("Expected string, Requirement, or Distribution", dist)
+    return dist
+
+
+def load_entry_point(dist, group, name):
+    """Return `name` entry point of `group` for `dist` or raise ImportError"""
+    return get_distribution(dist).load_entry_point(group, name)
+
+
+def get_entry_map(dist, group=None):
+    """Return the entry point map for `group`, or the full entry map"""
+    return get_distribution(dist).get_entry_map(group)
+
+
+def get_entry_info(dist, group, name):
+    """Return the EntryPoint object for `group`+`name`, or ``None``"""
+    return get_distribution(dist).get_entry_info(group, name)
+
+
+class IMetadataProvider:
+    def has_metadata(name):
+        """Does the package's distribution contain the named metadata?"""
+
+    def get_metadata(name):
+        """The named metadata resource as a string"""
+
+    def get_metadata_lines(name):
+        """Yield named metadata resource as list of non-blank non-comment lines
+
+       Leading and trailing whitespace is stripped from each line, and lines
+       with ``#`` as the first non-blank character are omitted."""
+
+    def metadata_isdir(name):
+        """Is the named metadata a directory?  (like ``os.path.isdir()``)"""
+
+    def metadata_listdir(name):
+        """List of metadata names in the directory (like ``os.listdir()``)"""
+
+    def run_script(script_name, namespace):
+        """Execute the named script in the supplied namespace dictionary"""
+
+
+class IResourceProvider(IMetadataProvider):
+    """An object that provides access to package resources"""
+
+    def get_resource_filename(manager, resource_name):
+        """Return a true filesystem path for `resource_name`
+
+        `manager` must be an ``IResourceManager``"""
+
+    def get_resource_stream(manager, resource_name):
+        """Return a readable file-like object for `resource_name`
+
+        `manager` must be an ``IResourceManager``"""
+
+    def get_resource_string(manager, resource_name):
+        """Return a string containing the contents of `resource_name`
+
+        `manager` must be an ``IResourceManager``"""
+
+    def has_resource(resource_name):
+        """Does the package contain the named resource?"""
+
+    def resource_isdir(resource_name):
+        """Is the named resource a directory?  (like ``os.path.isdir()``)"""
+
+    def resource_listdir(resource_name):
+        """List of resource names in the directory (like ``os.listdir()``)"""
+
+
+class WorkingSet(object):
+    """A collection of active distributions on sys.path (or a similar list)"""
+
+    def __init__(self, entries=None):
+        """Create working set from list of path entries (default=sys.path)"""
+        self.entries = []
+        self.entry_keys = {}
+        self.by_key = {}
+        self.callbacks = []
+
+        if entries is None:
+            entries = sys.path
+
+        for entry in entries:
+            self.add_entry(entry)
+
+    @classmethod
+    def _build_master(cls):
+        """
+        Prepare the master working set.
+        """
+        ws = cls()
+        try:
+            from __main__ import __requires__
+        except ImportError:
+            # The main program does not list any requirements
+            return ws
+
+        # ensure the requirements are met
+        try:
+            ws.require(__requires__)
+        except VersionConflict:
+            return cls._build_from_requirements(__requires__)
+
+        return ws
+
+    @classmethod
+    def _build_from_requirements(cls, req_spec):
+        """
+        Build a working set from a requirement spec. Rewrites sys.path.
+        """
+        # try it without defaults already on sys.path
+        # by starting with an empty path
+        ws = cls([])
+        reqs = parse_requirements(req_spec)
+        dists = ws.resolve(reqs, Environment())
+        for dist in dists:
+            ws.add(dist)
+
+        # add any missing entries from sys.path
+        for entry in sys.path:
+            if entry not in ws.entries:
+                ws.add_entry(entry)
+
+        # then copy back to sys.path
+        sys.path[:] = ws.entries
+        return ws
+
+    def add_entry(self, entry):
+        """Add a path item to ``.entries``, finding any distributions on it
+
+        ``find_distributions(entry, True)`` is used to find distributions
+        corresponding to the path entry, and they are added.  `entry` is
+        always appended to ``.entries``, even if it is already present.
+        (This is because ``sys.path`` can contain the same value more than
+        once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
+        equal ``sys.path``.)
+        """
+        self.entry_keys.setdefault(entry, [])
+        self.entries.append(entry)
+        for dist in find_distributions(entry, True):
+            self.add(dist, entry, False)
+
+    def __contains__(self, dist):
+        """True if `dist` is the active distribution for its project"""
+        return self.by_key.get(dist.key) == dist
+
+    def find(self, req):
+        """Find a distribution matching requirement `req`
+
+        If there is an active distribution for the requested project, this
+        returns it as long as it meets the version requirement specified by
+        `req`.  But, if there is an active distribution for the project and it
+        does *not* meet the `req` requirement, ``VersionConflict`` is raised.
+        If there is no active distribution for the requested project, ``None``
+        is returned.
+        """
+        dist = self.by_key.get(req.key)
+        if dist is not None and dist not in req:
+            # XXX add more info
+            raise VersionConflict(dist, req)
+        return dist
+
+    def iter_entry_points(self, group, name=None):
+        """Yield entry point objects from `group` matching `name`
+
+        If `name` is None, yields all entry points in `group` from all
+        distributions in the working set, otherwise only ones matching
+        both `group` and `name` are yielded (in distribution order).
+        """
+        for dist in self:
+            entries = dist.get_entry_map(group)
+            if name is None:
+                for ep in entries.values():
+                    yield ep
+            elif name in entries:
+                yield entries[name]
+
+    def run_script(self, requires, script_name):
+        """Locate distribution for `requires` and run `script_name` script"""
+        ns = sys._getframe(1).f_globals
+        name = ns['__name__']
+        ns.clear()
+        ns['__name__'] = name
+        self.require(requires)[0].run_script(script_name, ns)
+
+    def __iter__(self):
+        """Yield distributions for non-duplicate projects in the working set
+
+        The yield order is the order in which the items' path entries were
+        added to the working set.
+        """
+        seen = {}
+        for item in self.entries:
+            if item not in self.entry_keys:
+                # workaround a cache issue
+                continue
+
+            for key in self.entry_keys[item]:
+                if key not in seen:
+                    seen[key] = 1
+                    yield self.by_key[key]
+
+    def add(self, dist, entry=None, insert=True, replace=False):
+        """Add `dist` to working set, associated with `entry`
+
+        If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
+        On exit from this routine, `entry` is added to the end of the working
+        set's ``.entries`` (if it wasn't already present).
+
+        `dist` is only added to the working set if it's for a project that
+        doesn't already have a distribution in the set, unless `replace=True`.
+        If it's added, any callbacks registered with the ``subscribe()`` method
+        will be called.
+        """
+        if insert:
+            dist.insert_on(self.entries, entry, replace=replace)
+
+        if entry is None:
+            entry = dist.location
+        keys = self.entry_keys.setdefault(entry, [])
+        keys2 = self.entry_keys.setdefault(dist.location, [])
+        if not replace and dist.key in self.by_key:
+            # ignore hidden distros
+            return
+
+        self.by_key[dist.key] = dist
+        if dist.key not in keys:
+            keys.append(dist.key)
+        if dist.key not in keys2:
+            keys2.append(dist.key)
+        self._added_new(dist)
+
+    def resolve(self, requirements, env=None, installer=None,
+                replace_conflicting=False, extras=None):
+        """List all distributions needed to (recursively) meet `requirements`
+
+        `requirements` must be a sequence of ``Requirement`` objects.  `env`,
+        if supplied, should be an ``Environment`` instance.  If
+        not supplied, it defaults to all distributions available within any
+        entry or distribution in the working set.  `installer`, if supplied,
+        will be invoked with each requirement that cannot be met by an
+        already-installed distribution; it should return a ``Distribution`` or
+        ``None``.
+
+        Unless `replace_conflicting=True`, raises a VersionConflict exception
+        if
+        any requirements are found on the path that have the correct name but
+        the wrong version.  Otherwise, if an `installer` is supplied it will be
+        invoked to obtain the correct version of the requirement and activate
+        it.
+
+        `extras` is a list of the extras to be used with these requirements.
+        This is important because extra requirements may look like `my_req;
+        extra = "my_extra"`, which would otherwise be interpreted as a purely
+        optional requirement.  Instead, we want to be able to assert that these
+        requirements are truly required.
+        """
+
+        # set up the stack
+        requirements = list(requirements)[::-1]
+        # set of processed requirements
+        processed = {}
+        # key -> dist
+        best = {}
+        to_activate = []
+
+        req_extras = _ReqExtras()
+
+        # Mapping of requirement to set of distributions that required it;
+        # useful for reporting info about conflicts.
+        required_by = collections.defaultdict(set)
+
+        while requirements:
+            # process dependencies breadth-first
+            req = requirements.pop(0)
+            if req in processed:
+                # Ignore cyclic or redundant dependencies
+                continue
+
+            if not req_extras.markers_pass(req, extras):
+                continue
+
+            dist = best.get(req.key)
+            if dist is None:
+                # Find the best distribution and add it to the map
+                dist = self.by_key.get(req.key)
+                if dist is None or (dist not in req and replace_conflicting):
+                    ws = self
+                    if env is None:
+                        if dist is None:
+                            env = Environment(self.entries)
+                        else:
+                            # Use an empty environment and workingset to avoid
+                            # any further conflicts with the conflicting
+                            # distribution
+                            env = Environment([])
+                            ws = WorkingSet([])
+                    dist = best[req.key] = env.best_match(
+                        req, ws, installer,
+                        replace_conflicting=replace_conflicting
+                    )
+                    if dist is None:
+                        requirers = required_by.get(req, None)
+                        raise DistributionNotFound(req, requirers)
+                to_activate.append(dist)
+            if dist not in req:
+                # Oops, the "best" so far conflicts with a dependency
+                dependent_req = required_by[req]
+                raise VersionConflict(dist, req).with_context(dependent_req)
+
+            # push the new requirements onto the stack
+            new_requirements = dist.requires(req.extras)[::-1]
+            requirements.extend(new_requirements)
+
+            # Register the new requirements needed by req
+            for new_requirement in new_requirements:
+                required_by[new_requirement].add(req.project_name)
+                req_extras[new_requirement] = req.extras
+
+            processed[req] = True
+
+        # return list of distros to activate
+        return to_activate
+
+    def find_plugins(
+            self, plugin_env, full_env=None, installer=None, fallback=True):
+        """Find all activatable distributions in `plugin_env`
+
+        Example usage::
+
+            distributions, errors = working_set.find_plugins(
+                Environment(plugin_dirlist)
+            )
+            # add plugins+libs to sys.path
+            map(working_set.add, distributions)
+            # display errors
+            print('Could not load', errors)
+
+        The `plugin_env` should be an ``Environment`` instance that contains
+        only distributions that are in the project's "plugin directory" or
+        directories. The `full_env`, if supplied, should be an ``Environment``
+        contains all currently-available distributions.  If `full_env` is not
+        supplied, one is created automatically from the ``WorkingSet`` this
+        method is called on, which will typically mean that every directory on
+        ``sys.path`` will be scanned for distributions.
+
+        `installer` is a standard installer callback as used by the
+        ``resolve()`` method. The `fallback` flag indicates whether we should
+        attempt to resolve older versions of a plugin if the newest version
+        cannot be resolved.
+
+        This method returns a 2-tuple: (`distributions`, `error_info`), where
+        `distributions` is a list of the distributions found in `plugin_env`
+        that were loadable, along with any other distributions that are needed
+        to resolve their dependencies.  `error_info` is a dictionary mapping
+        unloadable plugin distributions to an exception instance describing the
+        error that occurred. Usually this will be a ``DistributionNotFound`` or
+        ``VersionConflict`` instance.
+        """
+
+        plugin_projects = list(plugin_env)
+        # scan project names in alphabetic order
+        plugin_projects.sort()
+
+        error_info = {}
+        distributions = {}
+
+        if full_env is None:
+            env = Environment(self.entries)
+            env += plugin_env
+        else:
+            env = full_env + plugin_env
+
+        shadow_set = self.__class__([])
+        # put all our entries in shadow_set
+        list(map(shadow_set.add, self))
+
+        for project_name in plugin_projects:
+
+            for dist in plugin_env[project_name]:
+
+                req = [dist.as_requirement()]
+
+                try:
+                    resolvees = shadow_set.resolve(req, env, installer)
+
+                except ResolutionError as v:
+                    # save error info
+                    error_info[dist] = v
+                    if fallback:
+                        # try the next older version of project
+                        continue
+                    else:
+                        # give up on this project, keep going
+                        break
+
+                else:
+                    list(map(shadow_set.add, resolvees))
+                    distributions.update(dict.fromkeys(resolvees))
+
+                    # success, no need to try any more versions of this project
+                    break
+
+        distributions = list(distributions)
+        distributions.sort()
+
+        return distributions, error_info
+
+    def require(self, *requirements):
+        """Ensure that distributions matching `requirements` are activated
+
+        `requirements` must be a string or a (possibly-nested) sequence
+        thereof, specifying the distributions and versions required.  The
+        return value is a sequence of the distributions that needed to be
+        activated to fulfill the requirements; all relevant distributions are
+        included, even if they were already activated in this working set.
+        """
+        needed = self.resolve(parse_requirements(requirements))
+
+        for dist in needed:
+            self.add(dist)
+
+        return needed
+
+    def subscribe(self, callback, existing=True):
+        """Invoke `callback` for all distributions
+
+        If `existing=True` (default),
+        call on all existing ones, as well.
+        """
+        if callback in self.callbacks:
+            return
+        self.callbacks.append(callback)
+        if not existing:
+            return
+        for dist in self:
+            callback(dist)
+
+    def _added_new(self, dist):
+        for callback in self.callbacks:
+            callback(dist)
+
+    def __getstate__(self):
+        return (
+            self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
+            self.callbacks[:]
+        )
+
+    def __setstate__(self, e_k_b_c):
+        entries, keys, by_key, callbacks = e_k_b_c
+        self.entries = entries[:]
+        self.entry_keys = keys.copy()
+        self.by_key = by_key.copy()
+        self.callbacks = callbacks[:]
+
+
+class _ReqExtras(dict):
+    """
+    Map each requirement to the extras that demanded it.
+    """
+
+    def markers_pass(self, req, extras=None):
+        """
+        Evaluate markers for req against each extra that
+        demanded it.
+
+        Return False if the req has a marker and fails
+        evaluation. Otherwise, return True.
+        """
+        extra_evals = (
+            req.marker.evaluate({'extra': extra})
+            for extra in self.get(req, ()) + (extras or (None,))
+        )
+        return not req.marker or any(extra_evals)
+
+
+class Environment(object):
+    """Searchable snapshot of distributions on a search path"""
+
+    def __init__(
+            self, search_path=None, platform=get_supported_platform(),
+            python=PY_MAJOR):
+        """Snapshot distributions available on a search path
+
+        Any distributions found on `search_path` are added to the environment.
+        `search_path` should be a sequence of ``sys.path`` items.  If not
+        supplied, ``sys.path`` is used.
+
+        `platform` is an optional string specifying the name of the platform
+        that platform-specific distributions must be compatible with.  If
+        unspecified, it defaults to the current platform.  `python` is an
+        optional string naming the desired version of Python (e.g. ``'3.3'``);
+        it defaults to the current version.
+
+        You may explicitly set `platform` (and/or `python`) to ``None`` if you
+        wish to map *all* distributions, not just those compatible with the
+        running platform or Python version.
+        """
+        self._distmap = {}
+        self.platform = platform
+        self.python = python
+        self.scan(search_path)
+
+    def can_add(self, dist):
+        """Is distribution `dist` acceptable for this environment?
+
+        The distribution must match the platform and python version
+        requirements specified when this environment was created, or False
+        is returned.
+        """
+        py_compat = (
+            self.python is None
+            or dist.py_version is None
+            or dist.py_version == self.python
+        )
+        return py_compat and compatible_platforms(dist.platform, self.platform)
+
+    def remove(self, dist):
+        """Remove `dist` from the environment"""
+        self._distmap[dist.key].remove(dist)
+
+    def scan(self, search_path=None):
+        """Scan `search_path` for distributions usable in this environment
+
+        Any distributions found are added to the environment.
+        `search_path` should be a sequence of ``sys.path`` items.  If not
+        supplied, ``sys.path`` is used.  Only distributions conforming to
+        the platform/python version defined at initialization are added.
+        """
+        if search_path is None:
+            search_path = sys.path
+
+        for item in search_path:
+            for dist in find_distributions(item):
+                self.add(dist)
+
+    def __getitem__(self, project_name):
+        """Return a newest-to-oldest list of distributions for `project_name`
+
+        Uses case-insensitive `project_name` comparison, assuming all the
+        project's distributions use their project's name converted to all
+        lowercase as their key.
+
+        """
+        distribution_key = project_name.lower()
+        return self._distmap.get(distribution_key, [])
+
+    def add(self, dist):
+        """Add `dist` if we ``can_add()`` it and it has not already been added
+        """
+        if self.can_add(dist) and dist.has_version():
+            dists = self._distmap.setdefault(dist.key, [])
+            if dist not in dists:
+                dists.append(dist)
+                dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
+
+    def best_match(
+            self, req, working_set, installer=None, replace_conflicting=False):
+        """Find distribution best matching `req` and usable on `working_set`
+
+        This calls the ``find(req)`` method of the `working_set` to see if a
+        suitable distribution is already active.  (This may raise
+        ``VersionConflict`` if an unsuitable version of the project is already
+        active in the specified `working_set`.)  If a suitable distribution
+        isn't active, this method returns the newest distribution in the
+        environment that meets the ``Requirement`` in `req`.  If no suitable
+        distribution is found, and `installer` is supplied, then the result of
+        calling the environment's ``obtain(req, installer)`` method will be
+        returned.
+        """
+        try:
+            dist = working_set.find(req)
+        except VersionConflict:
+            if not replace_conflicting:
+                raise
+            dist = None
+        if dist is not None:
+            return dist
+        for dist in self[req.key]:
+            if dist in req:
+                return dist
+        # try to download/install
+        return self.obtain(req, installer)
+
+    def obtain(self, requirement, installer=None):
+        """Obtain a distribution matching `requirement` (e.g. via download)
+
+        Obtain a distro that matches requirement (e.g. via download).  In the
+        base ``Environment`` class, this routine just returns
+        ``installer(requirement)``, unless `installer` is None, in which case
+        None is returned instead.  This method is a hook that allows subclasses
+        to attempt other ways of obtaining a distribution before falling back
+        to the `installer` argument."""
+        if installer is not None:
+            return installer(requirement)
+
+    def __iter__(self):
+        """Yield the unique project names of the available distributions"""
+        for key in self._distmap.keys():
+            if self[key]:
+                yield key
+
+    def __iadd__(self, other):
+        """In-place addition of a distribution or environment"""
+        if isinstance(other, Distribution):
+            self.add(other)
+        elif isinstance(other, Environment):
+            for project in other:
+                for dist in other[project]:
+                    self.add(dist)
+        else:
+            raise TypeError("Can't add %r to environment" % (other,))
+        return self
+
+    def __add__(self, other):
+        """Add an environment or distribution to an environment"""
+        new = self.__class__([], platform=None, python=None)
+        for env in self, other:
+            new += env
+        return new
+
+
+# XXX backward compatibility
+AvailableDistributions = Environment
+
+
+class ExtractionError(RuntimeError):
+    """An error occurred extracting a resource
+
+    The following attributes are available from instances of this exception:
+
+    manager
+        The resource manager that raised this exception
+
+    cache_path
+        The base directory for resource extraction
+
+    original_error
+        The exception instance that caused extraction to fail
+    """
+
+
+class ResourceManager:
+    """Manage resource extraction and packages"""
+    extraction_path = None
+
+    def __init__(self):
+        self.cached_files = {}
+
+    def resource_exists(self, package_or_requirement, resource_name):
+        """Does the named resource exist?"""
+        return get_provider(package_or_requirement).has_resource(resource_name)
+
+    def resource_isdir(self, package_or_requirement, resource_name):
+        """Is the named resource an existing directory?"""
+        return get_provider(package_or_requirement).resource_isdir(
+            resource_name
+        )
+
+    def resource_filename(self, package_or_requirement, resource_name):
+        """Return a true filesystem path for specified resource"""
+        return get_provider(package_or_requirement).get_resource_filename(
+            self, resource_name
+        )
+
+    def resource_stream(self, package_or_requirement, resource_name):
+        """Return a readable file-like object for specified resource"""
+        return get_provider(package_or_requirement).get_resource_stream(
+            self, resource_name
+        )
+
+    def resource_string(self, package_or_requirement, resource_name):
+        """Return specified resource as a string"""
+        return get_provider(package_or_requirement).get_resource_string(
+            self, resource_name
+        )
+
+    def resource_listdir(self, package_or_requirement, resource_name):
+        """List the contents of the named resource directory"""
+        return get_provider(package_or_requirement).resource_listdir(
+            resource_name
+        )
+
+    def extraction_error(self):
+        """Give an error message for problems extracting file(s)"""
+
+        old_exc = sys.exc_info()[1]
+        cache_path = self.extraction_path or get_default_cache()
+
+        tmpl = textwrap.dedent("""
+            Can't extract file(s) to egg cache
+
+            The following error occurred while trying to extract file(s)
+            to the Python egg cache:
+
+              {old_exc}
+
+            The Python egg cache directory is currently set to:
+
+              {cache_path}
+
+            Perhaps your account does not have write access to this directory?
+            You can change the cache directory by setting the PYTHON_EGG_CACHE
+            environment variable to point to an accessible directory.
+            """).lstrip()
+        err = ExtractionError(tmpl.format(**locals()))
+        err.manager = self
+        err.cache_path = cache_path
+        err.original_error = old_exc
+        raise err
+
+    def get_cache_path(self, archive_name, names=()):
+        """Return absolute location in cache for `archive_name` and `names`
+
+        The parent directory of the resulting path will be created if it does
+        not already exist.  `archive_name` should be the base filename of the
+        enclosing egg (which may not be the name of the enclosing zipfile!),
+        including its ".egg" extension.  `names`, if provided, should be a
+        sequence of path name parts "under" the egg's extraction location.
+
+        This method should only be called by resource providers that need to
+        obtain an extraction location, and only for names they intend to
+        extract, as it tracks the generated names for possible cleanup later.
+        """
+        extract_path = self.extraction_path or get_default_cache()
+        target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
+        try:
+            _bypass_ensure_directory(target_path)
+        except Exception:
+            self.extraction_error()
+
+        self._warn_unsafe_extraction_path(extract_path)
+
+        self.cached_files[target_path] = 1
+        return target_path
+
+    @staticmethod
+    def _warn_unsafe_extraction_path(path):
+        """
+        If the default extraction path is overridden and set to an insecure
+        location, such as /tmp, it opens up an opportunity for an attacker to
+        replace an extracted file with an unauthorized payload. Warn the user
+        if a known insecure location is used.
+
+        See Distribute #375 for more details.
+        """
+        if os.name == 'nt' and not path.startswith(os.environ['windir']):
+            # On Windows, permissions are generally restrictive by default
+            #  and temp directories are not writable by other users, so
+            #  bypass the warning.
+            return
+        mode = os.stat(path).st_mode
+        if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
+            msg = (
+                "%s is writable by group/others and vulnerable to attack "
+                "when "
+                "used with get_resource_filename. Consider a more secure "
+                "location (set with .set_extraction_path or the "
+                "PYTHON_EGG_CACHE environment variable)." % path
+            )
+            warnings.warn(msg, UserWarning)
+
+    def postprocess(self, tempname, filename):
+        """Perform any platform-specific postprocessing of `tempname`
+
+        This is where Mac header rewrites should be done; other platforms don't
+        have anything special they should do.
+
+        Resource providers should call this method ONLY after successfully
+        extracting a compressed resource.  They must NOT call it on resources
+        that are already in the filesystem.
+
+        `tempname` is the current (temporary) name of the file, and `filename`
+        is the name it will be renamed to by the caller after this routine
+        returns.
+        """
+
+        if os.name == 'posix':
+            # Make the resource executable
+            mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
+            os.chmod(tempname, mode)
+
+    def set_extraction_path(self, path):
+        """Set the base path where resources will be extracted to, if needed.
+
+        If you do not call this routine before any extractions take place, the
+        path defaults to the return value of ``get_default_cache()``.  (Which
+        is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
+        platform-specific fallbacks.  See that routine's documentation for more
+        details.)
+
+        Resources are extracted to subdirectories of this path based upon
+        information given by the ``IResourceProvider``.  You may set this to a
+        temporary directory, but then you must call ``cleanup_resources()`` to
+        delete the extracted files when done.  There is no guarantee that
+        ``cleanup_resources()`` will be able to remove all extracted files.
+
+        (Note: you may not change the extraction path for a given resource
+        manager once resources have been extracted, unless you first call
+        ``cleanup_resources()``.)
+        """
+        if self.cached_files:
+            raise ValueError(
+                "Can't change extraction path, files already extracted"
+            )
+
+        self.extraction_path = path
+
+    def cleanup_resources(self, force=False):
+        """
+        Delete all extracted resource files and directories, returning a list
+        of the file and directory names that could not be successfully removed.
+        This function does not have any concurrency protection, so it should
+        generally only be called when the extraction path is a temporary
+        directory exclusive to a single process.  This method is not
+        automatically called; you must call it explicitly or register it as an
+        ``atexit`` function if you wish to ensure cleanup of a temporary
+        directory used for extractions.
+        """
+        # XXX
+
+
+def get_default_cache():
+    """
+    Return the ``PYTHON_EGG_CACHE`` environment variable
+    or a platform-relevant user cache dir for an app
+    named "Python-Eggs".
+    """
+    return (
+        os.environ.get('PYTHON_EGG_CACHE')
+        or appdirs.user_cache_dir(appname='Python-Eggs')
+    )
+
+
+def safe_name(name):
+    """Convert an arbitrary string to a standard distribution name
+
+    Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+    """
+    return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+    """
+    Convert an arbitrary string to a standard version string
+    """
+    try:
+        # normalize the version
+        return str(packaging.version.Version(version))
+    except packaging.version.InvalidVersion:
+        version = version.replace(' ', '.')
+        return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def safe_extra(extra):
+    """Convert an arbitrary string to a standard 'extra' name
+
+    Any runs of non-alphanumeric characters are replaced with a single '_',
+    and the result is always lowercased.
+    """
+    return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
+
+
+def to_filename(name):
+    """Convert a project or version name to its filename-escaped form
+
+    Any '-' characters are currently replaced with '_'.
+    """
+    return name.replace('-', '_')
+
+
+def invalid_marker(text):
+    """
+    Validate text as a PEP 508 environment marker; return an exception
+    if invalid or False otherwise.
+    """
+    try:
+        evaluate_marker(text)
+    except SyntaxError as e:
+        e.filename = None
+        e.lineno = None
+        return e
+    return False
+
+
+def evaluate_marker(text, extra=None):
+    """
+    Evaluate a PEP 508 environment marker.
+    Return a boolean indicating the marker result in this environment.
+    Raise SyntaxError if marker is invalid.
+
+    This implementation uses the 'pyparsing' module.
+    """
+    try:
+        marker = packaging.markers.Marker(text)
+        return marker.evaluate()
+    except packaging.markers.InvalidMarker as e:
+        raise SyntaxError(e)
+
+
+class NullProvider:
+    """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
+
+    egg_name = None
+    egg_info = None
+    loader = None
+
+    def __init__(self, module):
+        self.loader = getattr(module, '__loader__', None)
+        self.module_path = os.path.dirname(getattr(module, '__file__', ''))
+
+    def get_resource_filename(self, manager, resource_name):
+        return self._fn(self.module_path, resource_name)
+
+    def get_resource_stream(self, manager, resource_name):
+        return io.BytesIO(self.get_resource_string(manager, resource_name))
+
+    def get_resource_string(self, manager, resource_name):
+        return self._get(self._fn(self.module_path, resource_name))
+
+    def has_resource(self, resource_name):
+        return self._has(self._fn(self.module_path, resource_name))
+
+    def has_metadata(self, name):
+        return self.egg_info and self._has(self._fn(self.egg_info, name))
+
+    def get_metadata(self, name):
+        if not self.egg_info:
+            return ""
+        value = self._get(self._fn(self.egg_info, name))
+        return value.decode('utf-8') if six.PY3 else value
+
+    def get_metadata_lines(self, name):
+        return yield_lines(self.get_metadata(name))
+
+    def resource_isdir(self, resource_name):
+        return self._isdir(self._fn(self.module_path, resource_name))
+
+    def metadata_isdir(self, name):
+        return self.egg_info and self._isdir(self._fn(self.egg_info, name))
+
+    def resource_listdir(self, resource_name):
+        return self._listdir(self._fn(self.module_path, resource_name))
+
+    def metadata_listdir(self, name):
+        if self.egg_info:
+            return self._listdir(self._fn(self.egg_info, name))
+        return []
+
+    def run_script(self, script_name, namespace):
+        script = 'scripts/' + script_name
+        if not self.has_metadata(script):
+            raise ResolutionError(
+                "Script {script!r} not found in metadata at {self.egg_info!r}"
+                .format(**locals()),
+            )
+        script_text = self.get_metadata(script).replace('\r\n', '\n')
+        script_text = script_text.replace('\r', '\n')
+        script_filename = self._fn(self.egg_info, script)
+        namespace['__file__'] = script_filename
+        if os.path.exists(script_filename):
+            source = open(script_filename).read()
+            code = compile(source, script_filename, 'exec')
+            exec(code, namespace, namespace)
+        else:
+            from linecache import cache
+            cache[script_filename] = (
+                len(script_text), 0, script_text.split('\n'), script_filename
+            )
+            script_code = compile(script_text, script_filename, 'exec')
+            exec(script_code, namespace, namespace)
+
+    def _has(self, path):
+        raise NotImplementedError(
+            "Can't perform this operation for unregistered loader type"
+        )
+
+    def _isdir(self, path):
+        raise NotImplementedError(
+            "Can't perform this operation for unregistered loader type"
+        )
+
+    def _listdir(self, path):
+        raise NotImplementedError(
+            "Can't perform this operation for unregistered loader type"
+        )
+
+    def _fn(self, base, resource_name):
+        if resource_name:
+            return os.path.join(base, *resource_name.split('/'))
+        return base
+
+    def _get(self, path):
+        if hasattr(self.loader, 'get_data'):
+            return self.loader.get_data(path)
+        raise NotImplementedError(
+            "Can't perform this operation for loaders without 'get_data()'"
+        )
+
+
+register_loader_type(object, NullProvider)
+
+
+class EggProvider(NullProvider):
+    """Provider based on a virtual filesystem"""
+
+    def __init__(self, module):
+        NullProvider.__init__(self, module)
+        self._setup_prefix()
+
+    def _setup_prefix(self):
+        # we assume here that our metadata may be nested inside a "basket"
+        # of multiple eggs; that's why we use module_path instead of .archive
+        path = self.module_path
+        old = None
+        while path != old:
+            if _is_egg_path(path):
+                self.egg_name = os.path.basename(path)
+                self.egg_info = os.path.join(path, 'EGG-INFO')
+                self.egg_root = path
+                break
+            old = path
+            path, base = os.path.split(path)
+
+
+class DefaultProvider(EggProvider):
+    """Provides access to package resources in the filesystem"""
+
+    def _has(self, path):
+        return os.path.exists(path)
+
+    def _isdir(self, path):
+        return os.path.isdir(path)
+
+    def _listdir(self, path):
+        return os.listdir(path)
+
+    def get_resource_stream(self, manager, resource_name):
+        return open(self._fn(self.module_path, resource_name), 'rb')
+
+    def _get(self, path):
+        with open(path, 'rb') as stream:
+            return stream.read()
+
+    @classmethod
+    def _register(cls):
+        loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
+        for name in loader_names:
+            loader_cls = getattr(importlib_machinery, name, type(None))
+            register_loader_type(loader_cls, cls)
+
+
+DefaultProvider._register()
+
+
+class EmptyProvider(NullProvider):
+    """Provider that returns nothing for all requests"""
+
+    module_path = None
+
+    _isdir = _has = lambda self, path: False
+
+    def _get(self, path):
+        return ''
+
+    def _listdir(self, path):
+        return []
+
+    def __init__(self):
+        pass
+
+
+empty_provider = EmptyProvider()
+
+
+class ZipManifests(dict):
+    """
+    zip manifest builder
+    """
+
+    @classmethod
+    def build(cls, path):
+        """
+        Build a dictionary similar to the zipimport directory
+        caches, except instead of tuples, store ZipInfo objects.
+
+        Use a platform-specific path separator (os.sep) for the path keys
+        for compatibility with pypy on Windows.
+        """
+        with zipfile.ZipFile(path) as zfile:
+            items = (
+                (
+                    name.replace('/', os.sep),
+                    zfile.getinfo(name),
+                )
+                for name in zfile.namelist()
+            )
+            return dict(items)
+
+    load = build
+
+
+class MemoizedZipManifests(ZipManifests):
+    """
+    Memoized zipfile manifests.
+    """
+    manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
+
+    def load(self, path):
+        """
+        Load a manifest at path or return a suitable manifest already loaded.
+        """
+        path = os.path.normpath(path)
+        mtime = os.stat(path).st_mtime
+
+        if path not in self or self[path].mtime != mtime:
+            manifest = self.build(path)
+            self[path] = self.manifest_mod(manifest, mtime)
+
+        return self[path].manifest
+
+
+class ZipProvider(EggProvider):
+    """Resource support for zips and eggs"""
+
+    eagers = None
+    _zip_manifests = MemoizedZipManifests()
+
+    def __init__(self, module):
+        EggProvider.__init__(self, module)
+        self.zip_pre = self.loader.archive + os.sep
+
+    def _zipinfo_name(self, fspath):
+        # Convert a virtual filename (full path to file) into a zipfile subpath
+        # usable with the zipimport directory cache for our target archive
+        fspath = fspath.rstrip(os.sep)
+        if fspath == self.loader.archive:
+            return ''
+        if fspath.startswith(self.zip_pre):
+            return fspath[len(self.zip_pre):]
+        raise AssertionError(
+            "%s is not a subpath of %s" % (fspath, self.zip_pre)
+        )
+
+    def _parts(self, zip_path):
+        # Convert a zipfile subpath into an egg-relative path part list.
+        # pseudo-fs path
+        fspath = self.zip_pre + zip_path
+        if fspath.startswith(self.egg_root + os.sep):
+            return fspath[len(self.egg_root) + 1:].split(os.sep)
+        raise AssertionError(
+            "%s is not a subpath of %s" % (fspath, self.egg_root)
+        )
+
+    @property
+    def zipinfo(self):
+        return self._zip_manifests.load(self.loader.archive)
+
+    def get_resource_filename(self, manager, resource_name):
+        if not self.egg_name:
+            raise NotImplementedError(
+                "resource_filename() only supported for .egg, not .zip"
+            )
+        # no need to lock for extraction, since we use temp names
+        zip_path = self._resource_to_zip(resource_name)
+        eagers = self._get_eager_resources()
+        if '/'.join(self._parts(zip_path)) in eagers:
+            for name in eagers:
+                self._extract_resource(manager, self._eager_to_zip(name))
+        return self._extract_resource(manager, zip_path)
+
+    @staticmethod
+    def _get_date_and_size(zip_stat):
+        size = zip_stat.file_size
+        # ymdhms+wday, yday, dst
+        date_time = zip_stat.date_time + (0, 0, -1)
+        # 1980 offset already done
+        timestamp = time.mktime(date_time)
+        return timestamp, size
+
+    def _extract_resource(self, manager, zip_path):
+
+        if zip_path in self._index():
+            for name in self._index()[zip_path]:
+                last = self._extract_resource(
+                    manager, os.path.join(zip_path, name)
+                )
+            # return the extracted directory name
+            return os.path.dirname(last)
+
+        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+
+        if not WRITE_SUPPORT:
+            raise IOError('"os.rename" and "os.unlink" are not supported '
+                          'on this platform')
+        try:
+
+            real_path = manager.get_cache_path(
+                self.egg_name, self._parts(zip_path)
+            )
+
+            if self._is_current(real_path, zip_path):
+                return real_path
+
+            outf, tmpnam = _mkstemp(
+                ".$extract",
+                dir=os.path.dirname(real_path),
+            )
+            os.write(outf, self.loader.get_data(zip_path))
+            os.close(outf)
+            utime(tmpnam, (timestamp, timestamp))
+            manager.postprocess(tmpnam, real_path)
+
+            try:
+                rename(tmpnam, real_path)
+
+            except os.error:
+                if os.path.isfile(real_path):
+                    if self._is_current(real_path, zip_path):
+                        # the file became current since it was checked above,
+                        #  so proceed.
+                        return real_path
+                    # Windows, del old file and retry
+                    elif os.name == 'nt':
+                        unlink(real_path)
+                        rename(tmpnam, real_path)
+                        return real_path
+                raise
+
+        except os.error:
+            # report a user-friendly error
+            manager.extraction_error()
+
+        return real_path
+
+    def _is_current(self, file_path, zip_path):
+        """
+        Return True if the file_path is current for this zip_path
+        """
+        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+        if not os.path.isfile(file_path):
+            return False
+        stat = os.stat(file_path)
+        if stat.st_size != size or stat.st_mtime != timestamp:
+            return False
+        # check that the contents match
+        zip_contents = self.loader.get_data(zip_path)
+        with open(file_path, 'rb') as f:
+            file_contents = f.read()
+        return zip_contents == file_contents
+
+    def _get_eager_resources(self):
+        if self.eagers is None:
+            eagers = []
+            for name in ('native_libs.txt', 'eager_resources.txt'):
+                if self.has_metadata(name):
+                    eagers.extend(self.get_metadata_lines(name))
+            self.eagers = eagers
+        return self.eagers
+
+    def _index(self):
+        try:
+            return self._dirindex
+        except AttributeError:
+            ind = {}
+            for path in self.zipinfo:
+                parts = path.split(os.sep)
+                while parts:
+                    parent = os.sep.join(parts[:-1])
+                    if parent in ind:
+                        ind[parent].append(parts[-1])
+                        break
+                    else:
+                        ind[parent] = [parts.pop()]
+            self._dirindex = ind
+            return ind
+
+    def _has(self, fspath):
+        zip_path = self._zipinfo_name(fspath)
+        return zip_path in self.zipinfo or zip_path in self._index()
+
+    def _isdir(self, fspath):
+        return self._zipinfo_name(fspath) in self._index()
+
+    def _listdir(self, fspath):
+        return list(self._index().get(self._zipinfo_name(fspath), ()))
+
+    def _eager_to_zip(self, resource_name):
+        return self._zipinfo_name(self._fn(self.egg_root, resource_name))
+
+    def _resource_to_zip(self, resource_name):
+        return self._zipinfo_name(self._fn(self.module_path, resource_name))
+
+
+register_loader_type(zipimport.zipimporter, ZipProvider)
+
+
+class FileMetadata(EmptyProvider):
+    """Metadata handler for standalone PKG-INFO files
+
+    Usage::
+
+        metadata = FileMetadata("/path/to/PKG-INFO")
+
+    This provider rejects all data and metadata requests except for PKG-INFO,
+    which is treated as existing, and will be the contents of the file at
+    the provided location.
+    """
+
+    def __init__(self, path):
+        self.path = path
+
+    def has_metadata(self, name):
+        return name == 'PKG-INFO' and os.path.isfile(self.path)
+
+    def get_metadata(self, name):
+        if name != 'PKG-INFO':
+            raise KeyError("No metadata except PKG-INFO is available")
+
+        with io.open(self.path, encoding='utf-8', errors="replace") as f:
+            metadata = f.read()
+        self._warn_on_replacement(metadata)
+        return metadata
+
+    def _warn_on_replacement(self, metadata):
+        # Python 2.7 compat for: replacement_char = '�'
+        replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
+        if replacement_char in metadata:
+            tmpl = "{self.path} could not be properly decoded in UTF-8"
+            msg = tmpl.format(**locals())
+            warnings.warn(msg)
+
+    def get_metadata_lines(self, name):
+        return yield_lines(self.get_metadata(name))
+
+
+class PathMetadata(DefaultProvider):
+    """Metadata provider for egg directories
+
+    Usage::
+
+        # Development eggs:
+
+        egg_info = "/path/to/PackageName.egg-info"
+        base_dir = os.path.dirname(egg_info)
+        metadata = PathMetadata(base_dir, egg_info)
+        dist_name = os.path.splitext(os.path.basename(egg_info))[0]
+        dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
+
+        # Unpacked egg directories:
+
+        egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
+        metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
+        dist = Distribution.from_filename(egg_path, metadata=metadata)
+    """
+
+    def __init__(self, path, egg_info):
+        self.module_path = path
+        self.egg_info = egg_info
+
+
+class EggMetadata(ZipProvider):
+    """Metadata provider for .egg files"""
+
+    def __init__(self, importer):
+        """Create a metadata provider from a zipimporter"""
+
+        self.zip_pre = importer.archive + os.sep
+        self.loader = importer
+        if importer.prefix:
+            self.module_path = os.path.join(importer.archive, importer.prefix)
+        else:
+            self.module_path = importer.archive
+        self._setup_prefix()
+
+
+_declare_state('dict', _distribution_finders={})
+
+
+def register_finder(importer_type, distribution_finder):
+    """Register `distribution_finder` to find distributions in sys.path items
+
+    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+    handler), and `distribution_finder` is a callable that, passed a path
+    item and the importer instance, yields ``Distribution`` instances found on
+    that path item.  See ``pkg_resources.find_on_path`` for an example."""
+    _distribution_finders[importer_type] = distribution_finder
+
+
+def find_distributions(path_item, only=False):
+    """Yield distributions accessible via `path_item`"""
+    importer = get_importer(path_item)
+    finder = _find_adapter(_distribution_finders, importer)
+    return finder(importer, path_item, only)
+
+
+def find_eggs_in_zip(importer, path_item, only=False):
+    """
+    Find eggs in zip files; possibly multiple nested eggs.
+    """
+    if importer.archive.endswith('.whl'):
+        # wheels are not supported with this finder
+        # they don't have PKG-INFO metadata, and won't ever contain eggs
+        return
+    metadata = EggMetadata(importer)
+    if metadata.has_metadata('PKG-INFO'):
+        yield Distribution.from_filename(path_item, metadata=metadata)
+    if only:
+        # don't yield nested distros
+        return
+    for subitem in metadata.resource_listdir('/'):
+        if _is_egg_path(subitem):
+            subpath = os.path.join(path_item, subitem)
+            dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
+            for dist in dists:
+                yield dist
+        elif subitem.lower().endswith('.dist-info'):
+            subpath = os.path.join(path_item, subitem)
+            submeta = EggMetadata(zipimport.zipimporter(subpath))
+            submeta.egg_info = subpath
+            yield Distribution.from_location(path_item, subitem, submeta)
+
+
+register_finder(zipimport.zipimporter, find_eggs_in_zip)
+
+
+def find_nothing(importer, path_item, only=False):
+    return ()
+
+
+register_finder(object, find_nothing)
+
+
+def _by_version_descending(names):
+    """
+    Given a list of filenames, return them in descending order
+    by version number.
+
+    >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
+    >>> _by_version_descending(names)
+    ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
+    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
+    >>> _by_version_descending(names)
+    ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
+    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
+    >>> _by_version_descending(names)
+    ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
+    """
+    def _by_version(name):
+        """
+        Parse each component of the filename
+        """
+        name, ext = os.path.splitext(name)
+        parts = itertools.chain(name.split('-'), [ext])
+        return [packaging.version.parse(part) for part in parts]
+
+    return sorted(names, key=_by_version, reverse=True)
+
+
+def find_on_path(importer, path_item, only=False):
+    """Yield distributions accessible on a sys.path directory"""
+    path_item = _normalize_cached(path_item)
+
+    if _is_unpacked_egg(path_item):
+        yield Distribution.from_filename(
+            path_item, metadata=PathMetadata(
+                path_item, os.path.join(path_item, 'EGG-INFO')
+            )
+        )
+        return
+
+    entries = safe_listdir(path_item)
+
+    # for performance, before sorting by version,
+    # screen entries for only those that will yield
+    # distributions
+    filtered = (
+        entry
+        for entry in entries
+        if dist_factory(path_item, entry, only)
+    )
+
+    # scan for .egg and .egg-info in directory
+    path_item_entries = _by_version_descending(filtered)
+    for entry in path_item_entries:
+        fullpath = os.path.join(path_item, entry)
+        factory = dist_factory(path_item, entry, only)
+        for dist in factory(fullpath):
+            yield dist
+
+
+def dist_factory(path_item, entry, only):
+    """
+    Return a dist_factory for a path_item and entry
+    """
+    lower = entry.lower()
+    is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
+    return (
+        distributions_from_metadata
+        if is_meta else
+        find_distributions
+        if not only and _is_egg_path(entry) else
+        resolve_egg_link
+        if not only and lower.endswith('.egg-link') else
+        NoDists()
+    )
+
+
+class NoDists:
+    """
+    >>> bool(NoDists())
+    False
+
+    >>> list(NoDists()('anything'))
+    []
+    """
+    def __bool__(self):
+        return False
+    if six.PY2:
+        __nonzero__ = __bool__
+
+    def __call__(self, fullpath):
+        return iter(())
+
+
+def safe_listdir(path):
+    """
+    Attempt to list contents of path, but suppress some exceptions.
+    """
+    try:
+        return os.listdir(path)
+    except (PermissionError, NotADirectoryError):
+        pass
+    except OSError as e:
+        # Ignore the directory if does not exist, not a directory or
+        # permission denied
+        ignorable = (
+            e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
+            # Python 2 on Windows needs to be handled this way :(
+            or getattr(e, "winerror", None) == 267
+        )
+        if not ignorable:
+            raise
+    return ()
+
+
+def distributions_from_metadata(path):
+    root = os.path.dirname(path)
+    if os.path.isdir(path):
+        if len(os.listdir(path)) == 0:
+            # empty metadata dir; skip
+            return
+        metadata = PathMetadata(root, path)
+    else:
+        metadata = FileMetadata(path)
+    entry = os.path.basename(path)
+    yield Distribution.from_location(
+        root, entry, metadata, precedence=DEVELOP_DIST,
+    )
+
+
+def non_empty_lines(path):
+    """
+    Yield non-empty lines from file at path
+    """
+    with open(path) as f:
+        for line in f:
+            line = line.strip()
+            if line:
+                yield line
+
+
+def resolve_egg_link(path):
+    """
+    Given a path to an .egg-link, resolve distributions
+    present in the referenced path.
+    """
+    referenced_paths = non_empty_lines(path)
+    resolved_paths = (
+        os.path.join(os.path.dirname(path), ref)
+        for ref in referenced_paths
+    )
+    dist_groups = map(find_distributions, resolved_paths)
+    return next(dist_groups, ())
+
+
+register_finder(pkgutil.ImpImporter, find_on_path)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+    register_finder(importlib_machinery.FileFinder, find_on_path)
+
+_declare_state('dict', _namespace_handlers={})
+_declare_state('dict', _namespace_packages={})
+
+
+def register_namespace_handler(importer_type, namespace_handler):
+    """Register `namespace_handler` to declare namespace packages
+
+    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+    handler), and `namespace_handler` is a callable like this::
+
+        def namespace_handler(importer, path_entry, moduleName, module):
+            # return a path_entry to use for child packages
+
+    Namespace handlers are only called if the importer object has already
+    agreed that it can handle the relevant path item, and they should only
+    return a subpath if the module __path__ does not already contain an
+    equivalent subpath.  For an example namespace handler, see
+    ``pkg_resources.file_ns_handler``.
+    """
+    _namespace_handlers[importer_type] = namespace_handler
+
+
+def _handle_ns(packageName, path_item):
+    """Ensure that named package includes a subpath of path_item (if needed)"""
+
+    importer = get_importer(path_item)
+    if importer is None:
+        return None
+    loader = importer.find_module(packageName)
+    if loader is None:
+        return None
+    module = sys.modules.get(packageName)
+    if module is None:
+        module = sys.modules[packageName] = types.ModuleType(packageName)
+        module.__path__ = []
+        _set_parent_ns(packageName)
+    elif not hasattr(module, '__path__'):
+        raise TypeError("Not a package:", packageName)
+    handler = _find_adapter(_namespace_handlers, importer)
+    subpath = handler(importer, path_item, packageName, module)
+    if subpath is not None:
+        path = module.__path__
+        path.append(subpath)
+        loader.load_module(packageName)
+        _rebuild_mod_path(path, packageName, module)
+    return subpath
+
+
+def _rebuild_mod_path(orig_path, package_name, module):
+    """
+    Rebuild module.__path__ ensuring that all entries are ordered
+    corresponding to their sys.path order
+    """
+    sys_path = [_normalize_cached(p) for p in sys.path]
+
+    def safe_sys_path_index(entry):
+        """
+        Workaround for #520 and #513.
+        """
+        try:
+            return sys_path.index(entry)
+        except ValueError:
+            return float('inf')
+
+    def position_in_sys_path(path):
+        """
+        Return the ordinal of the path based on its position in sys.path
+        """
+        path_parts = path.split(os.sep)
+        module_parts = package_name.count('.') + 1
+        parts = path_parts[:-module_parts]
+        return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
+
+    if not isinstance(orig_path, list):
+        # Is this behavior useful when module.__path__ is not a list?
+        return
+
+    orig_path.sort(key=position_in_sys_path)
+    module.__path__[:] = [_normalize_cached(p) for p in orig_path]
+
+
+def declare_namespace(packageName):
+    """Declare that package 'packageName' is a namespace package"""
+
+    _imp.acquire_lock()
+    try:
+        if packageName in _namespace_packages:
+            return
+
+        path, parent = sys.path, None
+        if '.' in packageName:
+            parent = '.'.join(packageName.split('.')[:-1])
+            declare_namespace(parent)
+            if parent not in _namespace_packages:
+                __import__(parent)
+            try:
+                path = sys.modules[parent].__path__
+            except AttributeError:
+                raise TypeError("Not a package:", parent)
+
+        # Track what packages are namespaces, so when new path items are added,
+        # they can be updated
+        _namespace_packages.setdefault(parent, []).append(packageName)
+        _namespace_packages.setdefault(packageName, [])
+
+        for path_item in path:
+            # Ensure all the parent's path items are reflected in the child,
+            # if they apply
+            _handle_ns(packageName, path_item)
+
+    finally:
+        _imp.release_lock()
+
+
+def fixup_namespace_packages(path_item, parent=None):
+    """Ensure that previously-declared namespace packages include path_item"""
+    _imp.acquire_lock()
+    try:
+        for package in _namespace_packages.get(parent, ()):
+            subpath = _handle_ns(package, path_item)
+            if subpath:
+                fixup_namespace_packages(subpath, package)
+    finally:
+        _imp.release_lock()
+
+
+def file_ns_handler(importer, path_item, packageName, module):
+    """Compute an ns-package subpath for a filesystem or zipfile importer"""
+
+    subpath = os.path.join(path_item, packageName.split('.')[-1])
+    normalized = _normalize_cached(subpath)
+    for item in module.__path__:
+        if _normalize_cached(item) == normalized:
+            break
+    else:
+        # Only return the path if it's not already there
+        return subpath
+
+
+register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
+register_namespace_handler(zipimport.zipimporter, file_ns_handler)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+    register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
+
+
+def null_ns_handler(importer, path_item, packageName, module):
+    return None
+
+
+register_namespace_handler(object, null_ns_handler)
+
+
+def normalize_path(filename):
+    """Normalize a file/dir name for comparison purposes"""
+    return os.path.normcase(os.path.realpath(filename))
+
+
+def _normalize_cached(filename, _cache={}):
+    try:
+        return _cache[filename]
+    except KeyError:
+        _cache[filename] = result = normalize_path(filename)
+        return result
+
+
+def _is_egg_path(path):
+    """
+    Determine if given path appears to be an egg.
+    """
+    return path.lower().endswith('.egg')
+
+
+def _is_unpacked_egg(path):
+    """
+    Determine if given path appears to be an unpacked egg.
+    """
+    return (
+        _is_egg_path(path) and
+        os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
+    )
+
+
+def _set_parent_ns(packageName):
+    parts = packageName.split('.')
+    name = parts.pop()
+    if parts:
+        parent = '.'.join(parts)
+        setattr(sys.modules[parent], name, sys.modules[packageName])
+
+
+def yield_lines(strs):
+    """Yield non-empty/non-comment lines of a string or sequence"""
+    if isinstance(strs, six.string_types):
+        for s in strs.splitlines():
+            s = s.strip()
+            # skip blank lines/comments
+            if s and not s.startswith('#'):
+                yield s
+    else:
+        for ss in strs:
+            for s in yield_lines(ss):
+                yield s
+
+
+MODULE = re.compile(r"\w+(\.\w+)*$").match
+EGG_NAME = re.compile(
+    r"""
+    (?P<name>[^-]+) (
+        -(?P<ver>[^-]+) (
+            -py(?P<pyver>[^-]+) (
+                -(?P<plat>.+)
+            )?
+        )?
+    )?
+    """,
+    re.VERBOSE | re.IGNORECASE,
+).match
+
+
+class EntryPoint(object):
+    """Object representing an advertised importable object"""
+
+    def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
+        if not MODULE(module_name):
+            raise ValueError("Invalid module name", module_name)
+        self.name = name
+        self.module_name = module_name
+        self.attrs = tuple(attrs)
+        self.extras = tuple(extras)
+        self.dist = dist
+
+    def __str__(self):
+        s = "%s = %s" % (self.name, self.module_name)
+        if self.attrs:
+            s += ':' + '.'.join(self.attrs)
+        if self.extras:
+            s += ' [%s]' % ','.join(self.extras)
+        return s
+
+    def __repr__(self):
+        return "EntryPoint.parse(%r)" % str(self)
+
+    def load(self, require=True, *args, **kwargs):
+        """
+        Require packages for this EntryPoint, then resolve it.
+        """
+        if not require or args or kwargs:
+            warnings.warn(
+                "Parameters to load are deprecated.  Call .resolve and "
+                ".require separately.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+        if require:
+            self.require(*args, **kwargs)
+        return self.resolve()
+
+    def resolve(self):
+        """
+        Resolve the entry point from its module and attrs.
+        """
+        module = __import__(self.module_name, fromlist=['__name__'], level=0)
+        try:
+            return functools.reduce(getattr, self.attrs, module)
+        except AttributeError as exc:
+            raise ImportError(str(exc))
+
+    def require(self, env=None, installer=None):
+        if self.extras and not self.dist:
+            raise UnknownExtra("Can't require() without a distribution", self)
+
+        # Get the requirements for this entry point with all its extras and
+        # then resolve them. We have to pass `extras` along when resolving so
+        # that the working set knows what extras we want. Otherwise, for
+        # dist-info distributions, the working set will assume that the
+        # requirements for that extra are purely optional and skip over them.
+        reqs = self.dist.requires(self.extras)
+        items = working_set.resolve(reqs, env, installer, extras=self.extras)
+        list(map(working_set.add, items))
+
+    pattern = re.compile(
+        r'\s*'
+        r'(?P<name>.+?)\s*'
+        r'=\s*'
+        r'(?P<module>[\w.]+)\s*'
+        r'(:\s*(?P<attr>[\w.]+))?\s*'
+        r'(?P<extras>\[.*\])?\s*$'
+    )
+
+    @classmethod
+    def parse(cls, src, dist=None):
+        """Parse a single entry point from string `src`
+
+        Entry point syntax follows the form::
+
+            name = some.module:some.attr [extra1, extra2]
+
+        The entry name and module name are required, but the ``:attrs`` and
+        ``[extras]`` parts are optional
+        """
+        m = cls.pattern.match(src)
+        if not m:
+            msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
+            raise ValueError(msg, src)
+        res = m.groupdict()
+        extras = cls._parse_extras(res['extras'])
+        attrs = res['attr'].split('.') if res['attr'] else ()
+        return cls(res['name'], res['module'], attrs, extras, dist)
+
+    @classmethod
+    def _parse_extras(cls, extras_spec):
+        if not extras_spec:
+            return ()
+        req = Requirement.parse('x' + extras_spec)
+        if req.specs:
+            raise ValueError()
+        return req.extras
+
+    @classmethod
+    def parse_group(cls, group, lines, dist=None):
+        """Parse an entry point group"""
+        if not MODULE(group):
+            raise ValueError("Invalid group name", group)
+        this = {}
+        for line in yield_lines(lines):
+            ep = cls.parse(line, dist)
+            if ep.name in this:
+                raise ValueError("Duplicate entry point", group, ep.name)
+            this[ep.name] = ep
+        return this
+
+    @classmethod
+    def parse_map(cls, data, dist=None):
+        """Parse a map of entry point groups"""
+        if isinstance(data, dict):
+            data = data.items()
+        else:
+            data = split_sections(data)
+        maps = {}
+        for group, lines in data:
+            if group is None:
+                if not lines:
+                    continue
+                raise ValueError("Entry points must be listed in groups")
+            group = group.strip()
+            if group in maps:
+                raise ValueError("Duplicate group name", group)
+            maps[group] = cls.parse_group(group, lines, dist)
+        return maps
+
+
+def _remove_md5_fragment(location):
+    if not location:
+        return ''
+    parsed = urllib.parse.urlparse(location)
+    if parsed[-1].startswith('md5='):
+        return urllib.parse.urlunparse(parsed[:-1] + ('',))
+    return location
+
+
+def _version_from_file(lines):
+    """
+    Given an iterable of lines from a Metadata file, return
+    the value of the Version field, if present, or None otherwise.
+    """
+    def is_version_line(line):
+        return line.lower().startswith('version:')
+    version_lines = filter(is_version_line, lines)
+    line = next(iter(version_lines), '')
+    _, _, value = line.partition(':')
+    return safe_version(value.strip()) or None
+
+
+class Distribution(object):
+    """Wrap an actual or potential sys.path entry w/metadata"""
+    PKG_INFO = 'PKG-INFO'
+
+    def __init__(
+            self, location=None, metadata=None, project_name=None,
+            version=None, py_version=PY_MAJOR, platform=None,
+            precedence=EGG_DIST):
+        self.project_name = safe_name(project_name or 'Unknown')
+        if version is not None:
+            self._version = safe_version(version)
+        self.py_version = py_version
+        self.platform = platform
+        self.location = location
+        self.precedence = precedence
+        self._provider = metadata or empty_provider
+
+    @classmethod
+    def from_location(cls, location, basename, metadata=None, **kw):
+        project_name, version, py_version, platform = [None] * 4
+        basename, ext = os.path.splitext(basename)
+        if ext.lower() in _distributionImpl:
+            cls = _distributionImpl[ext.lower()]
+
+            match = EGG_NAME(basename)
+            if match:
+                project_name, version, py_version, platform = match.group(
+                    'name', 'ver', 'pyver', 'plat'
+                )
+        return cls(
+            location, metadata, project_name=project_name, version=version,
+            py_version=py_version, platform=platform, **kw
+        )._reload_version()
+
+    def _reload_version(self):
+        return self
+
+    @property
+    def hashcmp(self):
+        return (
+            self.parsed_version,
+            self.precedence,
+            self.key,
+            _remove_md5_fragment(self.location),
+            self.py_version or '',
+            self.platform or '',
+        )
+
+    def __hash__(self):
+        return hash(self.hashcmp)
+
+    def __lt__(self, other):
+        return self.hashcmp < other.hashcmp
+
+    def __le__(self, other):
+        return self.hashcmp <= other.hashcmp
+
+    def __gt__(self, other):
+        return self.hashcmp > other.hashcmp
+
+    def __ge__(self, other):
+        return self.hashcmp >= other.hashcmp
+
+    def __eq__(self, other):
+        if not isinstance(other, self.__class__):
+            # It's not a Distribution, so they are not equal
+            return False
+        return self.hashcmp == other.hashcmp
+
+    def __ne__(self, other):
+        return not self == other
+
+    # These properties have to be lazy so that we don't have to load any
+    # metadata until/unless it's actually needed.  (i.e., some distributions
+    # may not know their name or version without loading PKG-INFO)
+
+    @property
+    def key(self):
+        try:
+            return self._key
+        except AttributeError:
+            self._key = key = self.project_name.lower()
+            return key
+
+    @property
+    def parsed_version(self):
+        if not hasattr(self, "_parsed_version"):
+            self._parsed_version = parse_version(self.version)
+
+        return self._parsed_version
+
+    def _warn_legacy_version(self):
+        LV = packaging.version.LegacyVersion
+        is_legacy = isinstance(self._parsed_version, LV)
+        if not is_legacy:
+            return
+
+        # While an empty version is technically a legacy version and
+        # is not a valid PEP 440 version, it's also unlikely to
+        # actually come from someone and instead it is more likely that
+        # it comes from setuptools attempting to parse a filename and
+        # including it in the list. So for that we'll gate this warning
+        # on if the version is anything at all or not.
+        if not self.version:
+            return
+
+        tmpl = textwrap.dedent("""
+            '{project_name} ({version})' is being parsed as a legacy,
+            non PEP 440,
+            version. You may find odd behavior and sort order.
+            In particular it will be sorted as less than 0.0. It
+            is recommended to migrate to PEP 440 compatible
+            versions.
+            """).strip().replace('\n', ' ')
+
+        warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
+
+    @property
+    def version(self):
+        try:
+            return self._version
+        except AttributeError:
+            version = _version_from_file(self._get_metadata(self.PKG_INFO))
+            if version is None:
+                tmpl = "Missing 'Version:' header and/or %s file"
+                raise ValueError(tmpl % self.PKG_INFO, self)
+            return version
+
+    @property
+    def _dep_map(self):
+        """
+        A map of extra to its list of (direct) requirements
+        for this distribution, including the null extra.
+        """
+        try:
+            return self.__dep_map
+        except AttributeError:
+            self.__dep_map = self._filter_extras(self._build_dep_map())
+        return self.__dep_map
+
+    @staticmethod
+    def _filter_extras(dm):
+        """
+        Given a mapping of extras to dependencies, strip off
+        environment markers and filter out any dependencies
+        not matching the markers.
+        """
+        for extra in list(filter(None, dm)):
+            new_extra = extra
+            reqs = dm.pop(extra)
+            new_extra, _, marker = extra.partition(':')
+            fails_marker = marker and (
+                invalid_marker(marker)
+                or not evaluate_marker(marker)
+            )
+            if fails_marker:
+                reqs = []
+            new_extra = safe_extra(new_extra) or None
+
+            dm.setdefault(new_extra, []).extend(reqs)
+        return dm
+
+    def _build_dep_map(self):
+        dm = {}
+        for name in 'requires.txt', 'depends.txt':
+            for extra, reqs in split_sections(self._get_metadata(name)):
+                dm.setdefault(extra, []).extend(parse_requirements(reqs))
+        return dm
+
+    def requires(self, extras=()):
+        """List of Requirements needed for this distro if `extras` are used"""
+        dm = self._dep_map
+        deps = []
+        deps.extend(dm.get(None, ()))
+        for ext in extras:
+            try:
+                deps.extend(dm[safe_extra(ext)])
+            except KeyError:
+                raise UnknownExtra(
+                    "%s has no such extra feature %r" % (self, ext)
+                )
+        return deps
+
+    def _get_metadata(self, name):
+        if self.has_metadata(name):
+            for line in self.get_metadata_lines(name):
+                yield line
+
+    def activate(self, path=None, replace=False):
+        """Ensure distribution is importable on `path` (default=sys.path)"""
+        if path is None:
+            path = sys.path
+        self.insert_on(path, replace=replace)
+        if path is sys.path:
+            fixup_namespace_packages(self.location)
+            for pkg in self._get_metadata('namespace_packages.txt'):
+                if pkg in sys.modules:
+                    declare_namespace(pkg)
+
+    def egg_name(self):
+        """Return what this distribution's standard .egg filename should be"""
+        filename = "%s-%s-py%s" % (
+            to_filename(self.project_name), to_filename(self.version),
+            self.py_version or PY_MAJOR
+        )
+
+        if self.platform:
+            filename += '-' + self.platform
+        return filename
+
+    def __repr__(self):
+        if self.location:
+            return "%s (%s)" % (self, self.location)
+        else:
+            return str(self)
+
+    def __str__(self):
+        try:
+            version = getattr(self, 'version', None)
+        except ValueError:
+            version = None
+        version = version or "[unknown version]"
+        return "%s %s" % (self.project_name, version)
+
+    def __getattr__(self, attr):
+        """Delegate all unrecognized public attributes to .metadata provider"""
+        if attr.startswith('_'):
+            raise AttributeError(attr)
+        return getattr(self._provider, attr)
+
+    @classmethod
+    def from_filename(cls, filename, metadata=None, **kw):
+        return cls.from_location(
+            _normalize_cached(filename), os.path.basename(filename), metadata,
+            **kw
+        )
+
+    def as_requirement(self):
+        """Return a ``Requirement`` that matches this distribution exactly"""
+        if isinstance(self.parsed_version, packaging.version.Version):
+            spec = "%s==%s" % (self.project_name, self.parsed_version)
+        else:
+            spec = "%s===%s" % (self.project_name, self.parsed_version)
+
+        return Requirement.parse(spec)
+
+    def load_entry_point(self, group, name):
+        """Return the `name` entry point of `group` or raise ImportError"""
+        ep = self.get_entry_info(group, name)
+        if ep is None:
+            raise ImportError("Entry point %r not found" % ((group, name),))
+        return ep.load()
+
+    def get_entry_map(self, group=None):
+        """Return the entry point map for `group`, or the full entry map"""
+        try:
+            ep_map = self._ep_map
+        except AttributeError:
+            ep_map = self._ep_map = EntryPoint.parse_map(
+                self._get_metadata('entry_points.txt'), self
+            )
+        if group is not None:
+            return ep_map.get(group, {})
+        return ep_map
+
+    def get_entry_info(self, group, name):
+        """Return the EntryPoint object for `group`+`name`, or ``None``"""
+        return self.get_entry_map(group).get(name)
+
+    def insert_on(self, path, loc=None, replace=False):
+        """Ensure self.location is on path
+
+        If replace=False (default):
+            - If location is already in path anywhere, do nothing.
+            - Else:
+              - If it's an egg and its parent directory is on path,
+                insert just ahead of the parent.
+              - Else: add to the end of path.
+        If replace=True:
+            - If location is already on path anywhere (not eggs)
+              or higher priority than its parent (eggs)
+              do nothing.
+            - Else:
+              - If it's an egg and its parent directory is on path,
+                insert just ahead of the parent,
+                removing any lower-priority entries.
+              - Else: add it to the front of path.
+        """
+
+        loc = loc or self.location
+        if not loc:
+            return
+
+        nloc = _normalize_cached(loc)
+        bdir = os.path.dirname(nloc)
+        npath = [(p and _normalize_cached(p) or p) for p in path]
+
+        for p, item in enumerate(npath):
+            if item == nloc:
+                if replace:
+                    break
+                else:
+                    # don't modify path (even removing duplicates) if
+                    # found and not replace
+                    return
+            elif item == bdir and self.precedence == EGG_DIST:
+                # if it's an .egg, give it precedence over its directory
+                # UNLESS it's already been added to sys.path and replace=False
+                if (not replace) and nloc in npath[p:]:
+                    return
+                if path is sys.path:
+                    self.check_version_conflict()
+                path.insert(p, loc)
+                npath.insert(p, nloc)
+                break
+        else:
+            if path is sys.path:
+                self.check_version_conflict()
+            if replace:
+                path.insert(0, loc)
+            else:
+                path.append(loc)
+            return
+
+        # p is the spot where we found or inserted loc; now remove duplicates
+        while True:
+            try:
+                np = npath.index(nloc, p + 1)
+            except ValueError:
+                break
+            else:
+                del npath[np], path[np]
+                # ha!
+                p = np
+
+        return
+
+    def check_version_conflict(self):
+        if self.key == 'setuptools':
+            # ignore the inevitable setuptools self-conflicts  :(
+            return
+
+        nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
+        loc = normalize_path(self.location)
+        for modname in self._get_metadata('top_level.txt'):
+            if (modname not in sys.modules or modname in nsp
+                    or modname in _namespace_packages):
+                continue
+            if modname in ('pkg_resources', 'setuptools', 'site'):
+                continue
+            fn = getattr(sys.modules[modname], '__file__', None)
+            if fn and (normalize_path(fn).startswith(loc) or
+                       fn.startswith(self.location)):
+                continue
+            issue_warning(
+                "Module %s was already imported from %s, but %s is being added"
+                " to sys.path" % (modname, fn, self.location),
+            )
+
+    def has_version(self):
+        try:
+            self.version
+        except ValueError:
+            issue_warning("Unbuilt egg for " + repr(self))
+            return False
+        return True
+
+    def clone(self, **kw):
+        """Copy this distribution, substituting in any changed keyword args"""
+        names = 'project_name version py_version platform location precedence'
+        for attr in names.split():
+            kw.setdefault(attr, getattr(self, attr, None))
+        kw.setdefault('metadata', self._provider)
+        return self.__class__(**kw)
+
+    @property
+    def extras(self):
+        return [dep for dep in self._dep_map if dep]
+
+
+class EggInfoDistribution(Distribution):
+    def _reload_version(self):
+        """
+        Packages installed by distutils (e.g. numpy or scipy),
+        which uses an old safe_version, and so
+        their version numbers can get mangled when
+        converted to filenames (e.g., 1.11.0.dev0+2329eae to
+        1.11.0.dev0_2329eae). These distributions will not be
+        parsed properly
+        downstream by Distribution and safe_version, so
+        take an extra step and try to get the version number from
+        the metadata file itself instead of the filename.
+        """
+        md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
+        if md_version:
+            self._version = md_version
+        return self
+
+
+class DistInfoDistribution(Distribution):
+    """
+    Wrap an actual or potential sys.path entry
+    w/metadata, .dist-info style.
+    """
+    PKG_INFO = 'METADATA'
+    EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
+
+    @property
+    def _parsed_pkg_info(self):
+        """Parse and cache metadata"""
+        try:
+            return self._pkg_info
+        except AttributeError:
+            metadata = self.get_metadata(self.PKG_INFO)
+            self._pkg_info = email.parser.Parser().parsestr(metadata)
+            return self._pkg_info
+
+    @property
+    def _dep_map(self):
+        try:
+            return self.__dep_map
+        except AttributeError:
+            self.__dep_map = self._compute_dependencies()
+            return self.__dep_map
+
+    def _compute_dependencies(self):
+        """Recompute this distribution's dependencies."""
+        dm = self.__dep_map = {None: []}
+
+        reqs = []
+        # Including any condition expressions
+        for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
+            reqs.extend(parse_requirements(req))
+
+        def reqs_for_extra(extra):
+            for req in reqs:
+                if not req.marker or req.marker.evaluate({'extra': extra}):
+                    yield req
+
+        common = frozenset(reqs_for_extra(None))
+        dm[None].extend(common)
+
+        for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
+            s_extra = safe_extra(extra.strip())
+            dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
+
+        return dm
+
+
+_distributionImpl = {
+    '.egg': Distribution,
+    '.egg-info': EggInfoDistribution,
+    '.dist-info': DistInfoDistribution,
+}
+
+
+def issue_warning(*args, **kw):
+    level = 1
+    g = globals()
+    try:
+        # find the first stack frame that is *not* code in
+        # the pkg_resources module, to use for the warning
+        while sys._getframe(level).f_globals is g:
+            level += 1
+    except ValueError:
+        pass
+    warnings.warn(stacklevel=level + 1, *args, **kw)
+
+
+class RequirementParseError(ValueError):
+    def __str__(self):
+        return ' '.join(self.args)
+
+
+def parse_requirements(strs):
+    """Yield ``Requirement`` objects for each specification in `strs`
+
+    `strs` must be a string, or a (possibly-nested) iterable thereof.
+    """
+    # create a steppable iterator, so we can handle \-continuations
+    lines = iter(yield_lines(strs))
+
+    for line in lines:
+        # Drop comments -- a hash without a space may be in a URL.
+        if ' #' in line:
+            line = line[:line.find(' #')]
+        # If there is a line continuation, drop it, and append the next line.
+        if line.endswith('\\'):
+            line = line[:-2].strip()
+            try:
+                line += next(lines)
+            except StopIteration:
+                return
+        yield Requirement(line)
+
+
+class Requirement(packaging.requirements.Requirement):
+    def __init__(self, requirement_string):
+        """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
+        try:
+            super(Requirement, self).__init__(requirement_string)
+        except packaging.requirements.InvalidRequirement as e:
+            raise RequirementParseError(str(e))
+        self.unsafe_name = self.name
+        project_name = safe_name(self.name)
+        self.project_name, self.key = project_name, project_name.lower()
+        self.specs = [
+            (spec.operator, spec.version) for spec in self.specifier]
+        self.extras = tuple(map(safe_extra, self.extras))
+        self.hashCmp = (
+            self.key,
+            self.specifier,
+            frozenset(self.extras),
+            str(self.marker) if self.marker else None,
+        )
+        self.__hash = hash(self.hashCmp)
+
+    def __eq__(self, other):
+        return (
+            isinstance(other, Requirement) and
+            self.hashCmp == other.hashCmp
+        )
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __contains__(self, item):
+        if isinstance(item, Distribution):
+            if item.key != self.key:
+                return False
+
+            item = item.version
+
+        # Allow prereleases always in order to match the previous behavior of
+        # this method. In the future this should be smarter and follow PEP 440
+        # more accurately.
+        return self.specifier.contains(item, prereleases=True)
+
+    def __hash__(self):
+        return self.__hash
+
+    def __repr__(self):
+        return "Requirement.parse(%r)" % str(self)
+
+    @staticmethod
+    def parse(s):
+        req, = parse_requirements(s)
+        return req
+
+
+def _always_object(classes):
+    """
+    Ensure object appears in the mro even
+    for old-style classes.
+    """
+    if object not in classes:
+        return classes + (object,)
+    return classes
+
+
+def _find_adapter(registry, ob):
+    """Return an adapter factory for `ob` from `registry`"""
+    types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
+    for t in types:
+        if t in registry:
+            return registry[t]
+
+
+def ensure_directory(path):
+    """Ensure that the parent directory of `path` exists"""
+    dirname = os.path.dirname(path)
+    py31compat.makedirs(dirname, exist_ok=True)
+
+
+def _bypass_ensure_directory(path):
+    """Sandbox-bypassing version of ensure_directory()"""
+    if not WRITE_SUPPORT:
+        raise IOError('"os.mkdir" not supported on this platform.')
+    dirname, filename = split(path)
+    if dirname and filename and not isdir(dirname):
+        _bypass_ensure_directory(dirname)
+        mkdir(dirname, 0o755)
+
+
+def split_sections(s):
+    """Split a string or iterable thereof into (section, content) pairs
+
+    Each ``section`` is a stripped version of the section header ("[section]")
+    and each ``content`` is a list of stripped lines excluding blank lines and
+    comment-only lines.  If there are any such lines before the first section
+    header, they're returned in a first ``section`` of ``None``.
+    """
+    section = None
+    content = []
+    for line in yield_lines(s):
+        if line.startswith("["):
+            if line.endswith("]"):
+                if section or content:
+                    yield section, content
+                section = line[1:-1].strip()
+                content = []
+            else:
+                raise ValueError("Invalid section heading", line)
+        else:
+            content.append(line)
+
+    # wrap up last segment
+    yield section, content
+
+
+def _mkstemp(*args, **kw):
+    old_open = os.open
+    try:
+        # temporarily bypass sandboxing
+        os.open = os_open
+        return tempfile.mkstemp(*args, **kw)
+    finally:
+        # and then put it back
+        os.open = old_open
+
+
+# Silence the PEP440Warning by default, so that end users don't get hit by it
+# randomly just because they use pkg_resources. We want to append the rule
+# because we want earlier uses of filterwarnings to take precedence over this
+# one.
+warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
+
+
+# from jaraco.functools 1.3
+def _call_aside(f, *args, **kwargs):
+    f(*args, **kwargs)
+    return f
+
+
+@_call_aside
+def _initialize(g=globals()):
+    "Set up global resource manager (deliberately not state-saved)"
+    manager = ResourceManager()
+    g['_manager'] = manager
+    g.update(
+        (name, getattr(manager, name))
+        for name in dir(manager)
+        if not name.startswith('_')
+    )
+
+
+@_call_aside
+def _initialize_master_working_set():
+    """
+    Prepare the master working set and make the ``require()``
+    API available.
+
+    This function has explicit effects on the global state
+    of pkg_resources. It is intended to be invoked once at
+    the initialization of this module.
+
+    Invocation by other packages is unsupported and done
+    at their own risk.
+    """
+    working_set = WorkingSet._build_master()
+    _declare_state('object', working_set=working_set)
+
+    require = working_set.require
+    iter_entry_points = working_set.iter_entry_points
+    add_activation_listener = working_set.subscribe
+    run_script = working_set.run_script
+    # backward compatibility
+    run_main = run_script
+    # Activate all distributions already on sys.path with replace=False and
+    # ensure that all distributions added to the working set in the future
+    # (e.g. by calling ``require()``) will get activated as well,
+    # with higher priority (replace=True).
+    tuple(
+        dist.activate(replace=False)
+        for dist in working_set
+    )
+    add_activation_listener(
+        lambda dist: dist.activate(replace=True),
+        existing=False,
+    )
+    working_set.entries = []
+    # match order
+    list(map(working_set.add_entry, sys.path))
+    globals().update(locals())
diff --git a/pkg_resources/_vendor/__init__.py b/pkg_resources/_vendor/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg_resources/_vendor/__init__.py
diff --git a/pkg_resources/_vendor/appdirs.py b/pkg_resources/_vendor/appdirs.py
new file mode 100644
index 0000000..f4dba09
--- /dev/null
+++ b/pkg_resources/_vendor/appdirs.py
@@ -0,0 +1,552 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2005-2010 ActiveState Software Inc.
+# Copyright (c) 2013 Eddy Petrișor
+
+"""Utilities for determining application-specific dirs.
+
+See <http://github.com/ActiveState/appdirs> for details and usage.
+"""
+# Dev Notes:
+# - MSDN on where to store app data files:
+#   http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
+# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
+# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+
+__version_info__ = (1, 4, 0)
+__version__ = '.'.join(map(str, __version_info__))
+
+
+import sys
+import os
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    unicode = str
+
+if sys.platform.startswith('java'):
+    import platform
+    os_name = platform.java_ver()[3][0]
+    if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
+        system = 'win32'
+    elif os_name.startswith('Mac'): # "Mac OS X", etc.
+        system = 'darwin'
+    else: # "Linux", "SunOS", "FreeBSD", etc.
+        # Setting this to "linux2" is not ideal, but only Windows or Mac
+        # are actually checked for and the rest of the module expects
+        # *sys.platform* style strings.
+        system = 'linux2'
+else:
+    system = sys.platform
+
+
+
+def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
+    r"""Return full path to the user-specific data dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "roaming" (boolean, default False) can be set True to use the Windows
+            roaming appdata directory. That means that for users on a Windows
+            network setup for roaming profiles, this user data will be
+            sync'd on login. See
+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+            for a discussion of issues.
+
+    Typical user data directories are:
+        Mac OS X:               ~/Library/Application Support/<AppName>
+        Unix:                   ~/.local/share/<AppName>    # or in $XDG_DATA_HOME, if defined
+        Win XP (not roaming):   C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
+        Win XP (roaming):       C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
+        Win 7  (not roaming):   C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
+        Win 7  (roaming):       C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
+
+    For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
+    That means, by default "~/.local/share/<AppName>".
+    """
+    if system == "win32":
+        if appauthor is None:
+            appauthor = appname
+        const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
+        path = os.path.normpath(_get_win_folder(const))
+        if appname:
+            if appauthor is not False:
+                path = os.path.join(path, appauthor, appname)
+            else:
+                path = os.path.join(path, appname)
+    elif system == 'darwin':
+        path = os.path.expanduser('~/Library/Application Support/')
+        if appname:
+            path = os.path.join(path, appname)
+    else:
+        path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
+        if appname:
+            path = os.path.join(path, appname)
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
+    """Return full path to the user-shared data dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "multipath" is an optional parameter only applicable to *nix
+            which indicates that the entire list of data dirs should be
+            returned. By default, the first item from XDG_DATA_DIRS is
+            returned, or '/usr/local/share/<AppName>',
+            if XDG_DATA_DIRS is not set
+
+    Typical user data directories are:
+        Mac OS X:   /Library/Application Support/<AppName>
+        Unix:       /usr/local/share/<AppName> or /usr/share/<AppName>
+        Win XP:     C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
+        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+        Win 7:      C:\ProgramData\<AppAuthor>\<AppName>   # Hidden, but writeable on Win 7.
+
+    For Unix, this is using the $XDG_DATA_DIRS[0] default.
+
+    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+    """
+    if system == "win32":
+        if appauthor is None:
+            appauthor = appname
+        path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
+        if appname:
+            if appauthor is not False:
+                path = os.path.join(path, appauthor, appname)
+            else:
+                path = os.path.join(path, appname)
+    elif system == 'darwin':
+        path = os.path.expanduser('/Library/Application Support')
+        if appname:
+            path = os.path.join(path, appname)
+    else:
+        # XDG default for $XDG_DATA_DIRS
+        # only first, if multipath is False
+        path = os.getenv('XDG_DATA_DIRS',
+                         os.pathsep.join(['/usr/local/share', '/usr/share']))
+        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+        if appname:
+            if version:
+                appname = os.path.join(appname, version)
+            pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+        if multipath:
+            path = os.pathsep.join(pathlist)
+        else:
+            path = pathlist[0]
+        return path
+
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
+    r"""Return full path to the user-specific config dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "roaming" (boolean, default False) can be set True to use the Windows
+            roaming appdata directory. That means that for users on a Windows
+            network setup for roaming profiles, this user data will be
+            sync'd on login. See
+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+            for a discussion of issues.
+
+    Typical user data directories are:
+        Mac OS X:               same as user_data_dir
+        Unix:                   ~/.config/<AppName>     # or in $XDG_CONFIG_HOME, if defined
+        Win *:                  same as user_data_dir
+
+    For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
+    That means, by deafult "~/.config/<AppName>".
+    """
+    if system in ["win32", "darwin"]:
+        path = user_data_dir(appname, appauthor, None, roaming)
+    else:
+        path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
+        if appname:
+            path = os.path.join(path, appname)
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
+    """Return full path to the user-shared data dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "multipath" is an optional parameter only applicable to *nix
+            which indicates that the entire list of config dirs should be
+            returned. By default, the first item from XDG_CONFIG_DIRS is
+            returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
+
+    Typical user data directories are:
+        Mac OS X:   same as site_data_dir
+        Unix:       /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
+                    $XDG_CONFIG_DIRS
+        Win *:      same as site_data_dir
+        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+
+    For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
+
+    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+    """
+    if system in ["win32", "darwin"]:
+        path = site_data_dir(appname, appauthor)
+        if appname and version:
+            path = os.path.join(path, version)
+    else:
+        # XDG default for $XDG_CONFIG_DIRS
+        # only first, if multipath is False
+        path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
+        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+        if appname:
+            if version:
+                appname = os.path.join(appname, version)
+            pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+        if multipath:
+            path = os.pathsep.join(pathlist)
+        else:
+            path = pathlist[0]
+    return path
+
+
+def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
+    r"""Return full path to the user-specific cache dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "opinion" (boolean) can be False to disable the appending of
+            "Cache" to the base app data dir for Windows. See
+            discussion below.
+
+    Typical user cache directories are:
+        Mac OS X:   ~/Library/Caches/<AppName>
+        Unix:       ~/.cache/<AppName> (XDG default)
+        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
+        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
+
+    On Windows the only suggestion in the MSDN docs is that local settings go in
+    the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
+    app data dir (the default returned by `user_data_dir` above). Apps typically
+    put cache data somewhere *under* the given dir here. Some examples:
+        ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
+        ...\Acme\SuperApp\Cache\1.0
+    OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
+    This can be disabled with the `opinion=False` option.
+    """
+    if system == "win32":
+        if appauthor is None:
+            appauthor = appname
+        path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
+        if appname:
+            if appauthor is not False:
+                path = os.path.join(path, appauthor, appname)
+            else:
+                path = os.path.join(path, appname)
+            if opinion:
+                path = os.path.join(path, "Cache")
+    elif system == 'darwin':
+        path = os.path.expanduser('~/Library/Caches')
+        if appname:
+            path = os.path.join(path, appname)
+    else:
+        path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
+        if appname:
+            path = os.path.join(path, appname)
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
+    r"""Return full path to the user-specific log dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "opinion" (boolean) can be False to disable the appending of
+            "Logs" to the base app data dir for Windows, and "log" to the
+            base cache dir for Unix. See discussion below.
+
+    Typical user cache directories are:
+        Mac OS X:   ~/Library/Logs/<AppName>
+        Unix:       ~/.cache/<AppName>/log  # or under $XDG_CACHE_HOME if defined
+        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
+        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
+
+    On Windows the only suggestion in the MSDN docs is that local settings
+    go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
+    examples of what some windows apps use for a logs dir.)
+
+    OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
+    value for Windows and appends "log" to the user cache dir for Unix.
+    This can be disabled with the `opinion=False` option.
+    """
+    if system == "darwin":
+        path = os.path.join(
+            os.path.expanduser('~/Library/Logs'),
+            appname)
+    elif system == "win32":
+        path = user_data_dir(appname, appauthor, version)
+        version = False
+        if opinion:
+            path = os.path.join(path, "Logs")
+    else:
+        path = user_cache_dir(appname, appauthor, version)
+        version = False
+        if opinion:
+            path = os.path.join(path, "log")
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+class AppDirs(object):
+    """Convenience wrapper for getting application dirs."""
+    def __init__(self, appname, appauthor=None, version=None, roaming=False,
+                 multipath=False):
+        self.appname = appname
+        self.appauthor = appauthor
+        self.version = version
+        self.roaming = roaming
+        self.multipath = multipath
+
+    @property
+    def user_data_dir(self):
+        return user_data_dir(self.appname, self.appauthor,
+                             version=self.version, roaming=self.roaming)
+
+    @property
+    def site_data_dir(self):
+        return site_data_dir(self.appname, self.appauthor,
+                             version=self.version, multipath=self.multipath)
+
+    @property
+    def user_config_dir(self):
+        return user_config_dir(self.appname, self.appauthor,
+                               version=self.version, roaming=self.roaming)
+
+    @property
+    def site_config_dir(self):
+        return site_config_dir(self.appname, self.appauthor,
+                             version=self.version, multipath=self.multipath)
+
+    @property
+    def user_cache_dir(self):
+        return user_cache_dir(self.appname, self.appauthor,
+                              version=self.version)
+
+    @property
+    def user_log_dir(self):
+        return user_log_dir(self.appname, self.appauthor,
+                            version=self.version)
+
+
+#---- internal support stuff
+
+def _get_win_folder_from_registry(csidl_name):
+    """This is a fallback technique at best. I'm not sure if using the
+    registry for this guarantees us the correct answer for all CSIDL_*
+    names.
+    """
+    import _winreg
+
+    shell_folder_name = {
+        "CSIDL_APPDATA": "AppData",
+        "CSIDL_COMMON_APPDATA": "Common AppData",
+        "CSIDL_LOCAL_APPDATA": "Local AppData",
+    }[csidl_name]
+
+    key = _winreg.OpenKey(
+        _winreg.HKEY_CURRENT_USER,
+        r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
+    )
+    dir, type = _winreg.QueryValueEx(key, shell_folder_name)
+    return dir
+
+
+def _get_win_folder_with_pywin32(csidl_name):
+    from win32com.shell import shellcon, shell
+    dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
+    # Try to make this a unicode path because SHGetFolderPath does
+    # not return unicode strings when there is unicode data in the
+    # path.
+    try:
+        dir = unicode(dir)
+
+        # Downgrade to short path name if have highbit chars. See
+        # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+        has_high_char = False
+        for c in dir:
+            if ord(c) > 255:
+                has_high_char = True
+                break
+        if has_high_char:
+            try:
+                import win32api
+                dir = win32api.GetShortPathName(dir)
+            except ImportError:
+                pass
+    except UnicodeError:
+        pass
+    return dir
+
+
+def _get_win_folder_with_ctypes(csidl_name):
+    import ctypes
+
+    csidl_const = {
+        "CSIDL_APPDATA": 26,
+        "CSIDL_COMMON_APPDATA": 35,
+        "CSIDL_LOCAL_APPDATA": 28,
+    }[csidl_name]
+
+    buf = ctypes.create_unicode_buffer(1024)
+    ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+    # Downgrade to short path name if have highbit chars. See
+    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+    has_high_char = False
+    for c in buf:
+        if ord(c) > 255:
+            has_high_char = True
+            break
+    if has_high_char:
+        buf2 = ctypes.create_unicode_buffer(1024)
+        if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+            buf = buf2
+
+    return buf.value
+
+def _get_win_folder_with_jna(csidl_name):
+    import array
+    from com.sun import jna
+    from com.sun.jna.platform import win32
+
+    buf_size = win32.WinDef.MAX_PATH * 2
+    buf = array.zeros('c', buf_size)
+    shell = win32.Shell32.INSTANCE
+    shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
+    dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+    # Downgrade to short path name if have highbit chars. See
+    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+    has_high_char = False
+    for c in dir:
+        if ord(c) > 255:
+            has_high_char = True
+            break
+    if has_high_char:
+        buf = array.zeros('c', buf_size)
+        kernel = win32.Kernel32.INSTANCE
+        if kernal.GetShortPathName(dir, buf, buf_size):
+            dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+    return dir
+
+if system == "win32":
+    try:
+        import win32com.shell
+        _get_win_folder = _get_win_folder_with_pywin32
+    except ImportError:
+        try:
+            from ctypes import windll
+            _get_win_folder = _get_win_folder_with_ctypes
+        except ImportError:
+            try:
+                import com.sun.jna
+                _get_win_folder = _get_win_folder_with_jna
+            except ImportError:
+                _get_win_folder = _get_win_folder_from_registry
+
+
+#---- self test code
+
+if __name__ == "__main__":
+    appname = "MyApp"
+    appauthor = "MyCompany"
+
+    props = ("user_data_dir", "site_data_dir",
+             "user_config_dir", "site_config_dir",
+             "user_cache_dir", "user_log_dir")
+
+    print("-- app dirs (with optional 'version')")
+    dirs = AppDirs(appname, appauthor, version="1.0")
+    for prop in props:
+        print("%s: %s" % (prop, getattr(dirs, prop)))
+
+    print("\n-- app dirs (without optional 'version')")
+    dirs = AppDirs(appname, appauthor)
+    for prop in props:
+        print("%s: %s" % (prop, getattr(dirs, prop)))
+
+    print("\n-- app dirs (without optional 'appauthor')")
+    dirs = AppDirs(appname)
+    for prop in props:
+        print("%s: %s" % (prop, getattr(dirs, prop)))
+
+    print("\n-- app dirs (with disabled 'appauthor')")
+    dirs = AppDirs(appname, appauthor=False)
+    for prop in props:
+        print("%s: %s" % (prop, getattr(dirs, prop)))
diff --git a/pkg_resources/_vendor/packaging/__about__.py b/pkg_resources/_vendor/packaging/__about__.py
new file mode 100644
index 0000000..95d330e
--- /dev/null
+++ b/pkg_resources/_vendor/packaging/__about__.py
@@ -0,0 +1,21 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+__all__ = [
+    "__title__", "__summary__", "__uri__", "__version__", "__author__",
+    "__email__", "__license__", "__copyright__",
+]
+
+__title__ = "packaging"
+__summary__ = "Core utilities for Python packages"
+__uri__ = "https://github.com/pypa/packaging"
+
+__version__ = "16.8"
+
+__author__ = "Donald Stufft and individual contributors"
+__email__ = "donald@stufft.io"
+
+__license__ = "BSD or Apache License, Version 2.0"
+__copyright__ = "Copyright 2014-2016 %s" % __author__
diff --git a/pkg_resources/_vendor/packaging/__init__.py b/pkg_resources/_vendor/packaging/__init__.py
new file mode 100644
index 0000000..5ee6220
--- /dev/null
+++ b/pkg_resources/_vendor/packaging/__init__.py
@@ -0,0 +1,14 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+from .__about__ import (
+    __author__, __copyright__, __email__, __license__, __summary__, __title__,
+    __uri__, __version__
+)
+
+__all__ = [
+    "__title__", "__summary__", "__uri__", "__version__", "__author__",
+    "__email__", "__license__", "__copyright__",
+]
diff --git a/pkg_resources/_vendor/packaging/_compat.py b/pkg_resources/_vendor/packaging/_compat.py
new file mode 100644
index 0000000..210bb80
--- /dev/null
+++ b/pkg_resources/_vendor/packaging/_compat.py
@@ -0,0 +1,30 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+# flake8: noqa
+
+if PY3:
+    string_types = str,
+else:
+    string_types = basestring,
+
+
+def with_metaclass(meta, *bases):
+    """
+    Create a base class with a metaclass.
+    """
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
diff --git a/pkg_resources/_vendor/packaging/_structures.py b/pkg_resources/_vendor/packaging/_structures.py
new file mode 100644
index 0000000..ccc2786
--- /dev/null
+++ b/pkg_resources/_vendor/packaging/_structures.py
@@ -0,0 +1,68 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+
+class Infinity(object):
+
+    def __repr__(self):
+        return "Infinity"
+
+    def __hash__(self):
+        return hash(repr(self))
+
+    def __lt__(self, other):
+        return False
+
+    def __le__(self, other):
+        return False
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__)
+
+    def __ne__(self, other):
+        return not isinstance(other, self.__class__)
+
+    def __gt__(self, other):
+        return True
+
+    def __ge__(self, other):
+        return True
+
+    def __neg__(self):
+        return NegativeInfinity
+
+Infinity = Infinity()
+
+
+class NegativeInfinity(object):
+
+    def __repr__(self):
+        return "-Infinity"
+
+    def __hash__(self):
+        return hash(repr(self))
+
+    def __lt__(self, other):
+        return True
+
+    def __le__(self, other):
+        return True
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__)
+
+    def __ne__(self, other):
+        return not isinstance(other, self.__class__)
+
+    def __gt__(self, other):
+        return False
+
+    def __ge__(self, other):
+        return False
+
+    def __neg__(self):
+        return Infinity
+
+NegativeInfinity = NegativeInfinity()
diff --git a/pkg_resources/_vendor/packaging/markers.py b/pkg_resources/_vendor/packaging/markers.py
new file mode 100644
index 0000000..892e578
--- /dev/null
+++ b/pkg_resources/_vendor/packaging/markers.py
@@ -0,0 +1,301 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import operator
+import os
+import platform
+import sys
+
+from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
+from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
+from pkg_resources.extern.pyparsing import Literal as L  # noqa
+
+from ._compat import string_types
+from .specifiers import Specifier, InvalidSpecifier
+
+
+__all__ = [
+    "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
+    "Marker", "default_environment",
+]
+
+
+class InvalidMarker(ValueError):
+    """
+    An invalid marker was found, users should refer to PEP 508.
+    """
+
+
+class UndefinedComparison(ValueError):
+    """
+    An invalid operation was attempted on a value that doesn't support it.
+    """
+
+
+class UndefinedEnvironmentName(ValueError):
+    """
+    A name was attempted to be used that does not exist inside of the
+    environment.
+    """
+
+
+class Node(object):
+
+    def __init__(self, value):
+        self.value = value
+
+    def __str__(self):
+        return str(self.value)
+
+    def __repr__(self):
+        return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
+
+    def serialize(self):
+        raise NotImplementedError
+
+
+class Variable(Node):
+
+    def serialize(self):
+        return str(self)
+
+
+class Value(Node):
+
+    def serialize(self):
+        return '"{0}"'.format(self)
+
+
+class Op(Node):
+
+    def serialize(self):
+        return str(self)
+
+
+VARIABLE = (
+    L("implementation_version") |
+    L("platform_python_implementation") |
+    L("implementation_name") |
+    L("python_full_version") |
+    L("platform_release") |
+    L("platform_version") |
+    L("platform_machine") |
+    L("platform_system") |
+    L("python_version") |
+    L("sys_platform") |
+    L("os_name") |
+    L("os.name") |  # PEP-345
+    L("sys.platform") |  # PEP-345
+    L("platform.version") |  # PEP-345
+    L("platform.machine") |  # PEP-345
+    L("platform.python_implementation") |  # PEP-345
+    L("python_implementation") |  # undocumented setuptools legacy
+    L("extra")
+)
+ALIASES = {
+    'os.name': 'os_name',
+    'sys.platform': 'sys_platform',
+    'platform.version': 'platform_version',
+    'platform.machine': 'platform_machine',
+    'platform.python_implementation': 'platform_python_implementation',
+    'python_implementation': 'platform_python_implementation'
+}
+VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
+
+VERSION_CMP = (
+    L("===") |
+    L("==") |
+    L(">=") |
+    L("<=") |
+    L("!=") |
+    L("~=") |
+    L(">") |
+    L("<")
+)
+
+MARKER_OP = VERSION_CMP | L("not in") | L("in")
+MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
+
+MARKER_VALUE = QuotedString("'") | QuotedString('"')
+MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
+
+BOOLOP = L("and") | L("or")
+
+MARKER_VAR = VARIABLE | MARKER_VALUE
+
+MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
+MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
+
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+
+MARKER_EXPR = Forward()
+MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
+MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
+
+MARKER = stringStart + MARKER_EXPR + stringEnd
+
+
+def _coerce_parse_result(results):
+    if isinstance(results, ParseResults):
+        return [_coerce_parse_result(i) for i in results]
+    else:
+        return results
+
+
+def _format_marker(marker, first=True):
+    assert isinstance(marker, (list, tuple, string_types))
+
+    # Sometimes we have a structure like [[...]] which is a single item list
+    # where the single item is itself it's own list. In that case we want skip
+    # the rest of this function so that we don't get extraneous () on the
+    # outside.
+    if (isinstance(marker, list) and len(marker) == 1 and
+            isinstance(marker[0], (list, tuple))):
+        return _format_marker(marker[0])
+
+    if isinstance(marker, list):
+        inner = (_format_marker(m, first=False) for m in marker)
+        if first:
+            return " ".join(inner)
+        else:
+            return "(" + " ".join(inner) + ")"
+    elif isinstance(marker, tuple):
+        return " ".join([m.serialize() for m in marker])
+    else:
+        return marker
+
+
+_operators = {
+    "in": lambda lhs, rhs: lhs in rhs,
+    "not in": lambda lhs, rhs: lhs not in rhs,
+    "<": operator.lt,
+    "<=": operator.le,
+    "==": operator.eq,
+    "!=": operator.ne,
+    ">=": operator.ge,
+    ">": operator.gt,
+}
+
+
+def _eval_op(lhs, op, rhs):
+    try:
+        spec = Specifier("".join([op.serialize(), rhs]))
+    except InvalidSpecifier:
+        pass
+    else:
+        return spec.contains(lhs)
+
+    oper = _operators.get(op.serialize())
+    if oper is None:
+        raise UndefinedComparison(
+            "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
+        )
+
+    return oper(lhs, rhs)
+
+
+_undefined = object()
+
+
+def _get_env(environment, name):
+    value = environment.get(name, _undefined)
+
+    if value is _undefined:
+        raise UndefinedEnvironmentName(
+            "{0!r} does not exist in evaluation environment.".format(name)
+        )
+
+    return value
+
+
+def _evaluate_markers(markers, environment):
+    groups = [[]]
+
+    for marker in markers:
+        assert isinstance(marker, (list, tuple, string_types))
+
+        if isinstance(marker, list):
+            groups[-1].append(_evaluate_markers(marker, environment))
+        elif isinstance(marker, tuple):
+            lhs, op, rhs = marker
+
+            if isinstance(lhs, Variable):
+                lhs_value = _get_env(environment, lhs.value)
+                rhs_value = rhs.value
+            else:
+                lhs_value = lhs.value
+                rhs_value = _get_env(environment, rhs.value)
+
+            groups[-1].append(_eval_op(lhs_value, op, rhs_value))
+        else:
+            assert marker in ["and", "or"]
+            if marker == "or":
+                groups.append([])
+
+    return any(all(item) for item in groups)
+
+
+def format_full_version(info):
+    version = '{0.major}.{0.minor}.{0.micro}'.format(info)
+    kind = info.releaselevel
+    if kind != 'final':
+        version += kind[0] + str(info.serial)
+    return version
+
+
+def default_environment():
+    if hasattr(sys, 'implementation'):
+        iver = format_full_version(sys.implementation.version)
+        implementation_name = sys.implementation.name
+    else:
+        iver = '0'
+        implementation_name = ''
+
+    return {
+        "implementation_name": implementation_name,
+        "implementation_version": iver,
+        "os_name": os.name,
+        "platform_machine": platform.machine(),
+        "platform_release": platform.release(),
+        "platform_system": platform.system(),
+        "platform_version": platform.version(),
+        "python_full_version": platform.python_version(),
+        "platform_python_implementation": platform.python_implementation(),
+        "python_version": platform.python_version()[:3],
+        "sys_platform": sys.platform,
+    }
+
+
+class Marker(object):
+
+    def __init__(self, marker):
+        try:
+            self._markers = _coerce_parse_result(MARKER.parseString(marker))
+        except ParseException as e:
+            err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
+                marker, marker[e.loc:e.loc + 8])
+            raise InvalidMarker(err_str)
+
+    def __str__(self):
+        return _format_marker(self._markers)
+
+    def __repr__(self):
+        return "<Marker({0!r})>".format(str(self))
+
+    def evaluate(self, environment=None):
+        """Evaluate a marker.
+
+        Return the boolean from evaluating the given marker against the
+        environment. environment is an optional argument to override all or
+        part of the determined environment.
+
+        The environment is determined from the current Python process.
+        """
+        current_environment = default_environment()
+        if environment is not None:
+            current_environment.update(environment)
+
+        return _evaluate_markers(self._markers, current_environment)
diff --git a/pkg_resources/_vendor/packaging/requirements.py b/pkg_resources/_vendor/packaging/requirements.py
new file mode 100644
index 0000000..0c8c4a3
--- /dev/null
+++ b/pkg_resources/_vendor/packaging/requirements.py
@@ -0,0 +1,127 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import string
+import re
+
+from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
+from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
+from pkg_resources.extern.pyparsing import Literal as L  # noqa
+from pkg_resources.extern.six.moves.urllib import parse as urlparse
+
+from .markers import MARKER_EXPR, Marker
+from .specifiers import LegacySpecifier, Specifier, SpecifierSet
+
+
+class InvalidRequirement(ValueError):
+    """
+    An invalid requirement was found, users should refer to PEP 508.
+    """
+
+
+ALPHANUM = Word(string.ascii_letters + string.digits)
+
+LBRACKET = L("[").suppress()
+RBRACKET = L("]").suppress()
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+COMMA = L(",").suppress()
+SEMICOLON = L(";").suppress()
+AT = L("@").suppress()
+
+PUNCTUATION = Word("-_.")
+IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
+IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
+
+NAME = IDENTIFIER("name")
+EXTRA = IDENTIFIER
+
+URI = Regex(r'[^ ]+')("url")
+URL = (AT + URI)
+
+EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
+EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
+
+VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
+VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
+
+VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
+VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
+                       joinString=",", adjacent=False)("_raw_spec")
+_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
+_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
+
+VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
+VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
+
+MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
+MARKER_EXPR.setParseAction(
+    lambda s, l, t: Marker(s[t._original_start:t._original_end])
+)
+MARKER_SEPERATOR = SEMICOLON
+MARKER = MARKER_SEPERATOR + MARKER_EXPR
+
+VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
+URL_AND_MARKER = URL + Optional(MARKER)
+
+NAMED_REQUIREMENT = \
+    NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
+
+REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
+
+
+class Requirement(object):
+    """Parse a requirement.
+
+    Parse a given requirement string into its parts, such as name, specifier,
+    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
+    string.
+    """
+
+    # TODO: Can we test whether something is contained within a requirement?
+    #       If so how do we do that? Do we need to test against the _name_ of
+    #       the thing as well as the version? What about the markers?
+    # TODO: Can we normalize the name and extra name?
+
+    def __init__(self, requirement_string):
+        try:
+            req = REQUIREMENT.parseString(requirement_string)
+        except ParseException as e:
+            raise InvalidRequirement(
+                "Invalid requirement, parse error at \"{0!r}\"".format(
+                    requirement_string[e.loc:e.loc + 8]))
+
+        self.name = req.name
+        if req.url:
+            parsed_url = urlparse.urlparse(req.url)
+            if not (parsed_url.scheme and parsed_url.netloc) or (
+                    not parsed_url.scheme and not parsed_url.netloc):
+                raise InvalidRequirement("Invalid URL given")
+            self.url = req.url
+        else:
+            self.url = None
+        self.extras = set(req.extras.asList() if req.extras else [])
+        self.specifier = SpecifierSet(req.specifier)
+        self.marker = req.marker if req.marker else None
+
+    def __str__(self):
+        parts = [self.name]
+
+        if self.extras:
+            parts.append("[{0}]".format(",".join(sorted(self.extras))))
+
+        if self.specifier:
+            parts.append(str(self.specifier))
+
+        if self.url:
+            parts.append("@ {0}".format(self.url))
+
+        if self.marker:
+            parts.append("; {0}".format(self.marker))
+
+        return "".join(parts)
+
+    def __repr__(self):
+        return "<Requirement({0!r})>".format(str(self))
diff --git a/pkg_resources/_vendor/packaging/specifiers.py b/pkg_resources/_vendor/packaging/specifiers.py
new file mode 100644
index 0000000..7f5a76c
--- /dev/null
+++ b/pkg_resources/_vendor/packaging/specifiers.py
@@ -0,0 +1,774 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import abc
+import functools
+import itertools
+import re
+
+from ._compat import string_types, with_metaclass
+from .version import Version, LegacyVersion, parse
+
+
+class InvalidSpecifier(ValueError):
+    """
+    An invalid specifier was found, users should refer to PEP 440.
+    """
+
+
+class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
+
+    @abc.abstractmethod
+    def __str__(self):
+        """
+        Returns the str representation of this Specifier like object. This
+        should be representative of the Specifier itself.
+        """
+
+    @abc.abstractmethod
+    def __hash__(self):
+        """
+        Returns a hash value for this Specifier like object.
+        """
+
+    @abc.abstractmethod
+    def __eq__(self, other):
+        """
+        Returns a boolean representing whether or not the two Specifier like
+        objects are equal.
+        """
+
+    @abc.abstractmethod
+    def __ne__(self, other):
+        """
+        Returns a boolean representing whether or not the two Specifier like
+        objects are not equal.
+        """
+
+    @abc.abstractproperty
+    def prereleases(self):
+        """
+        Returns whether or not pre-releases as a whole are allowed by this
+        specifier.
+        """
+
+    @prereleases.setter
+    def prereleases(self, value):
+        """
+        Sets whether or not pre-releases as a whole are allowed by this
+        specifier.
+        """
+
+    @abc.abstractmethod
+    def contains(self, item, prereleases=None):
+        """
+        Determines if the given item is contained within this specifier.
+        """
+
+    @abc.abstractmethod
+    def filter(self, iterable, prereleases=None):
+        """
+        Takes an iterable of items and filters them so that only items which
+        are contained within this specifier are allowed in it.
+        """
+
+
+class _IndividualSpecifier(BaseSpecifier):
+
+    _operators = {}
+
+    def __init__(self, spec="", prereleases=None):
+        match = self._regex.search(spec)
+        if not match:
+            raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
+
+        self._spec = (
+            match.group("operator").strip(),
+            match.group("version").strip(),
+        )
+
+        # Store whether or not this Specifier should accept prereleases
+        self._prereleases = prereleases
+
+    def __repr__(self):
+        pre = (
+            ", prereleases={0!r}".format(self.prereleases)
+            if self._prereleases is not None
+            else ""
+        )
+
+        return "<{0}({1!r}{2})>".format(
+            self.__class__.__name__,
+            str(self),
+            pre,
+        )
+
+    def __str__(self):
+        return "{0}{1}".format(*self._spec)
+
+    def __hash__(self):
+        return hash(self._spec)
+
+    def __eq__(self, other):
+        if isinstance(other, string_types):
+            try:
+                other = self.__class__(other)
+            except InvalidSpecifier:
+                return NotImplemented
+        elif not isinstance(other, self.__class__):
+            return NotImplemented
+
+        return self._spec == other._spec
+
+    def __ne__(self, other):
+        if isinstance(other, string_types):
+            try:
+                other = self.__class__(other)
+            except InvalidSpecifier:
+                return NotImplemented
+        elif not isinstance(other, self.__class__):
+            return NotImplemented
+
+        return self._spec != other._spec
+
+    def _get_operator(self, op):
+        return getattr(self, "_compare_{0}".format(self._operators[op]))
+
+    def _coerce_version(self, version):
+        if not isinstance(version, (LegacyVersion, Version)):
+            version = parse(version)
+        return version
+
+    @property
+    def operator(self):
+        return self._spec[0]
+
+    @property
+    def version(self):
+        return self._spec[1]
+
+    @property
+    def prereleases(self):
+        return self._prereleases
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+    def __contains__(self, item):
+        return self.contains(item)
+
+    def contains(self, item, prereleases=None):
+        # Determine if prereleases are to be allowed or not.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # Normalize item to a Version or LegacyVersion, this allows us to have
+        # a shortcut for ``"2.0" in Specifier(">=2")
+        item = self._coerce_version(item)
+
+        # Determine if we should be supporting prereleases in this specifier
+        # or not, if we do not support prereleases than we can short circuit
+        # logic if this version is a prereleases.
+        if item.is_prerelease and not prereleases:
+            return False
+
+        # Actually do the comparison to determine if this item is contained
+        # within this Specifier or not.
+        return self._get_operator(self.operator)(item, self.version)
+
+    def filter(self, iterable, prereleases=None):
+        yielded = False
+        found_prereleases = []
+
+        kw = {"prereleases": prereleases if prereleases is not None else True}
+
+        # Attempt to iterate over all the values in the iterable and if any of
+        # them match, yield them.
+        for version in iterable:
+            parsed_version = self._coerce_version(version)
+
+            if self.contains(parsed_version, **kw):
+                # If our version is a prerelease, and we were not set to allow
+                # prereleases, then we'll store it for later incase nothing
+                # else matches this specifier.
+                if (parsed_version.is_prerelease and not
+                        (prereleases or self.prereleases)):
+                    found_prereleases.append(version)
+                # Either this is not a prerelease, or we should have been
+                # accepting prereleases from the begining.
+                else:
+                    yielded = True
+                    yield version
+
+        # Now that we've iterated over everything, determine if we've yielded
+        # any values, and if we have not and we have any prereleases stored up
+        # then we will go ahead and yield the prereleases.
+        if not yielded and found_prereleases:
+            for version in found_prereleases:
+                yield version
+
+
+class LegacySpecifier(_IndividualSpecifier):
+
+    _regex_str = (
+        r"""
+        (?P<operator>(==|!=|<=|>=|<|>))
+        \s*
+        (?P<version>
+            [^,;\s)]* # Since this is a "legacy" specifier, and the version
+                      # string can be just about anything, we match everything
+                      # except for whitespace, a semi-colon for marker support,
+                      # a closing paren since versions can be enclosed in
+                      # them, and a comma since it's a version separator.
+        )
+        """
+    )
+
+    _regex = re.compile(
+        r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    _operators = {
+        "==": "equal",
+        "!=": "not_equal",
+        "<=": "less_than_equal",
+        ">=": "greater_than_equal",
+        "<": "less_than",
+        ">": "greater_than",
+    }
+
+    def _coerce_version(self, version):
+        if not isinstance(version, LegacyVersion):
+            version = LegacyVersion(str(version))
+        return version
+
+    def _compare_equal(self, prospective, spec):
+        return prospective == self._coerce_version(spec)
+
+    def _compare_not_equal(self, prospective, spec):
+        return prospective != self._coerce_version(spec)
+
+    def _compare_less_than_equal(self, prospective, spec):
+        return prospective <= self._coerce_version(spec)
+
+    def _compare_greater_than_equal(self, prospective, spec):
+        return prospective >= self._coerce_version(spec)
+
+    def _compare_less_than(self, prospective, spec):
+        return prospective < self._coerce_version(spec)
+
+    def _compare_greater_than(self, prospective, spec):
+        return prospective > self._coerce_version(spec)
+
+
+def _require_version_compare(fn):
+    @functools.wraps(fn)
+    def wrapped(self, prospective, spec):
+        if not isinstance(prospective, Version):
+            return False
+        return fn(self, prospective, spec)
+    return wrapped
+
+
+class Specifier(_IndividualSpecifier):
+
+    _regex_str = (
+        r"""
+        (?P<operator>(~=|==|!=|<=|>=|<|>|===))
+        (?P<version>
+            (?:
+                # The identity operators allow for an escape hatch that will
+                # do an exact string match of the version you wish to install.
+                # This will not be parsed by PEP 440 and we cannot determine
+                # any semantic meaning from it. This operator is discouraged
+                # but included entirely as an escape hatch.
+                (?<====)  # Only match for the identity operator
+                \s*
+                [^\s]*    # We just match everything, except for whitespace
+                          # since we are only testing for strict identity.
+            )
+            |
+            (?:
+                # The (non)equality operators allow for wild card and local
+                # versions to be specified so we have to define these two
+                # operators separately to enable that.
+                (?<===|!=)            # Only match for equals and not equals
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)*   # release
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+
+                # You cannot use a wild card and a dev or local version
+                # together so group them with a | and make them optional.
+                (?:
+                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
+                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
+                    |
+                    \.\*  # Wild card syntax of .*
+                )?
+            )
+            |
+            (?:
+                # The compatible operator requires at least two digits in the
+                # release segment.
+                (?<=~=)               # Only match for the compatible operator
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
+            )
+            |
+            (?:
+                # All other operators only allow a sub set of what the
+                # (non)equality operators do. Specifically they do not allow
+                # local versions to be specified nor do they allow the prefix
+                # matching wild cards.
+                (?<!==|!=|~=)         # We have special cases for these
+                                      # operators so we want to make sure they
+                                      # don't match here.
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)*   # release
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
+            )
+        )
+        """
+    )
+
+    _regex = re.compile(
+        r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    _operators = {
+        "~=": "compatible",
+        "==": "equal",
+        "!=": "not_equal",
+        "<=": "less_than_equal",
+        ">=": "greater_than_equal",
+        "<": "less_than",
+        ">": "greater_than",
+        "===": "arbitrary",
+    }
+
+    @_require_version_compare
+    def _compare_compatible(self, prospective, spec):
+        # Compatible releases have an equivalent combination of >= and ==. That
+        # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
+        # implement this in terms of the other specifiers instead of
+        # implementing it ourselves. The only thing we need to do is construct
+        # the other specifiers.
+
+        # We want everything but the last item in the version, but we want to
+        # ignore post and dev releases and we want to treat the pre-release as
+        # it's own separate segment.
+        prefix = ".".join(
+            list(
+                itertools.takewhile(
+                    lambda x: (not x.startswith("post") and not
+                               x.startswith("dev")),
+                    _version_split(spec),
+                )
+            )[:-1]
+        )
+
+        # Add the prefix notation to the end of our string
+        prefix += ".*"
+
+        return (self._get_operator(">=")(prospective, spec) and
+                self._get_operator("==")(prospective, prefix))
+
+    @_require_version_compare
+    def _compare_equal(self, prospective, spec):
+        # We need special logic to handle prefix matching
+        if spec.endswith(".*"):
+            # In the case of prefix matching we want to ignore local segment.
+            prospective = Version(prospective.public)
+            # Split the spec out by dots, and pretend that there is an implicit
+            # dot in between a release segment and a pre-release segment.
+            spec = _version_split(spec[:-2])  # Remove the trailing .*
+
+            # Split the prospective version out by dots, and pretend that there
+            # is an implicit dot in between a release segment and a pre-release
+            # segment.
+            prospective = _version_split(str(prospective))
+
+            # Shorten the prospective version to be the same length as the spec
+            # so that we can determine if the specifier is a prefix of the
+            # prospective version or not.
+            prospective = prospective[:len(spec)]
+
+            # Pad out our two sides with zeros so that they both equal the same
+            # length.
+            spec, prospective = _pad_version(spec, prospective)
+        else:
+            # Convert our spec string into a Version
+            spec = Version(spec)
+
+            # If the specifier does not have a local segment, then we want to
+            # act as if the prospective version also does not have a local
+            # segment.
+            if not spec.local:
+                prospective = Version(prospective.public)
+
+        return prospective == spec
+
+    @_require_version_compare
+    def _compare_not_equal(self, prospective, spec):
+        return not self._compare_equal(prospective, spec)
+
+    @_require_version_compare
+    def _compare_less_than_equal(self, prospective, spec):
+        return prospective <= Version(spec)
+
+    @_require_version_compare
+    def _compare_greater_than_equal(self, prospective, spec):
+        return prospective >= Version(spec)
+
+    @_require_version_compare
+    def _compare_less_than(self, prospective, spec):
+        # Convert our spec to a Version instance, since we'll want to work with
+        # it as a version.
+        spec = Version(spec)
+
+        # Check to see if the prospective version is less than the spec
+        # version. If it's not we can short circuit and just return False now
+        # instead of doing extra unneeded work.
+        if not prospective < spec:
+            return False
+
+        # This special case is here so that, unless the specifier itself
+        # includes is a pre-release version, that we do not accept pre-release
+        # versions for the version mentioned in the specifier (e.g. <3.1 should
+        # not match 3.1.dev0, but should match 3.0.dev0).
+        if not spec.is_prerelease and prospective.is_prerelease:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # If we've gotten to here, it means that prospective version is both
+        # less than the spec version *and* it's not a pre-release of the same
+        # version in the spec.
+        return True
+
+    @_require_version_compare
+    def _compare_greater_than(self, prospective, spec):
+        # Convert our spec to a Version instance, since we'll want to work with
+        # it as a version.
+        spec = Version(spec)
+
+        # Check to see if the prospective version is greater than the spec
+        # version. If it's not we can short circuit and just return False now
+        # instead of doing extra unneeded work.
+        if not prospective > spec:
+            return False
+
+        # This special case is here so that, unless the specifier itself
+        # includes is a post-release version, that we do not accept
+        # post-release versions for the version mentioned in the specifier
+        # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
+        if not spec.is_postrelease and prospective.is_postrelease:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # Ensure that we do not allow a local version of the version mentioned
+        # in the specifier, which is techincally greater than, to match.
+        if prospective.local is not None:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # If we've gotten to here, it means that prospective version is both
+        # greater than the spec version *and* it's not a pre-release of the
+        # same version in the spec.
+        return True
+
+    def _compare_arbitrary(self, prospective, spec):
+        return str(prospective).lower() == str(spec).lower()
+
+    @property
+    def prereleases(self):
+        # If there is an explicit prereleases set for this, then we'll just
+        # blindly use that.
+        if self._prereleases is not None:
+            return self._prereleases
+
+        # Look at all of our specifiers and determine if they are inclusive
+        # operators, and if they are if they are including an explicit
+        # prerelease.
+        operator, version = self._spec
+        if operator in ["==", ">=", "<=", "~=", "==="]:
+            # The == specifier can include a trailing .*, if it does we
+            # want to remove before parsing.
+            if operator == "==" and version.endswith(".*"):
+                version = version[:-2]
+
+            # Parse the version, and if it is a pre-release than this
+            # specifier allows pre-releases.
+            if parse(version).is_prerelease:
+                return True
+
+        return False
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+
+_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
+
+
+def _version_split(version):
+    result = []
+    for item in version.split("."):
+        match = _prefix_regex.search(item)
+        if match:
+            result.extend(match.groups())
+        else:
+            result.append(item)
+    return result
+
+
+def _pad_version(left, right):
+    left_split, right_split = [], []
+
+    # Get the release segment of our versions
+    left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
+    right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
+
+    # Get the rest of our versions
+    left_split.append(left[len(left_split[0]):])
+    right_split.append(right[len(right_split[0]):])
+
+    # Insert our padding
+    left_split.insert(
+        1,
+        ["0"] * max(0, len(right_split[0]) - len(left_split[0])),
+    )
+    right_split.insert(
+        1,
+        ["0"] * max(0, len(left_split[0]) - len(right_split[0])),
+    )
+
+    return (
+        list(itertools.chain(*left_split)),
+        list(itertools.chain(*right_split)),
+    )
+
+
+class SpecifierSet(BaseSpecifier):
+
+    def __init__(self, specifiers="", prereleases=None):
+        # Split on , to break each indidivual specifier into it's own item, and
+        # strip each item to remove leading/trailing whitespace.
+        specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+
+        # Parsed each individual specifier, attempting first to make it a
+        # Specifier and falling back to a LegacySpecifier.
+        parsed = set()
+        for specifier in specifiers:
+            try:
+                parsed.add(Specifier(specifier))
+            except InvalidSpecifier:
+                parsed.add(LegacySpecifier(specifier))
+
+        # Turn our parsed specifiers into a frozen set and save them for later.
+        self._specs = frozenset(parsed)
+
+        # Store our prereleases value so we can use it later to determine if
+        # we accept prereleases or not.
+        self._prereleases = prereleases
+
+    def __repr__(self):
+        pre = (
+            ", prereleases={0!r}".format(self.prereleases)
+            if self._prereleases is not None
+            else ""
+        )
+
+        return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
+
+    def __str__(self):
+        return ",".join(sorted(str(s) for s in self._specs))
+
+    def __hash__(self):
+        return hash(self._specs)
+
+    def __and__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        specifier = SpecifierSet()
+        specifier._specs = frozenset(self._specs | other._specs)
+
+        if self._prereleases is None and other._prereleases is not None:
+            specifier._prereleases = other._prereleases
+        elif self._prereleases is not None and other._prereleases is None:
+            specifier._prereleases = self._prereleases
+        elif self._prereleases == other._prereleases:
+            specifier._prereleases = self._prereleases
+        else:
+            raise ValueError(
+                "Cannot combine SpecifierSets with True and False prerelease "
+                "overrides."
+            )
+
+        return specifier
+
+    def __eq__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif isinstance(other, _IndividualSpecifier):
+            other = SpecifierSet(str(other))
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        return self._specs == other._specs
+
+    def __ne__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif isinstance(other, _IndividualSpecifier):
+            other = SpecifierSet(str(other))
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        return self._specs != other._specs
+
+    def __len__(self):
+        return len(self._specs)
+
+    def __iter__(self):
+        return iter(self._specs)
+
+    @property
+    def prereleases(self):
+        # If we have been given an explicit prerelease modifier, then we'll
+        # pass that through here.
+        if self._prereleases is not None:
+            return self._prereleases
+
+        # If we don't have any specifiers, and we don't have a forced value,
+        # then we'll just return None since we don't know if this should have
+        # pre-releases or not.
+        if not self._specs:
+            return None
+
+        # Otherwise we'll see if any of the given specifiers accept
+        # prereleases, if any of them do we'll return True, otherwise False.
+        return any(s.prereleases for s in self._specs)
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+    def __contains__(self, item):
+        return self.contains(item)
+
+    def contains(self, item, prereleases=None):
+        # Ensure that our item is a Version or LegacyVersion instance.
+        if not isinstance(item, (LegacyVersion, Version)):
+            item = parse(item)
+
+        # Determine if we're forcing a prerelease or not, if we're not forcing
+        # one for this particular filter call, then we'll use whatever the
+        # SpecifierSet thinks for whether or not we should support prereleases.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # We can determine if we're going to allow pre-releases by looking to
+        # see if any of the underlying items supports them. If none of them do
+        # and this item is a pre-release then we do not allow it and we can
+        # short circuit that here.
+        # Note: This means that 1.0.dev1 would not be contained in something
+        #       like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
+        if not prereleases and item.is_prerelease:
+            return False
+
+        # We simply dispatch to the underlying specs here to make sure that the
+        # given version is contained within all of them.
+        # Note: This use of all() here means that an empty set of specifiers
+        #       will always return True, this is an explicit design decision.
+        return all(
+            s.contains(item, prereleases=prereleases)
+            for s in self._specs
+        )
+
+    def filter(self, iterable, prereleases=None):
+        # Determine if we're forcing a prerelease or not, if we're not forcing
+        # one for this particular filter call, then we'll use whatever the
+        # SpecifierSet thinks for whether or not we should support prereleases.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # If we have any specifiers, then we want to wrap our iterable in the
+        # filter method for each one, this will act as a logical AND amongst
+        # each specifier.
+        if self._specs:
+            for spec in self._specs:
+                iterable = spec.filter(iterable, prereleases=bool(prereleases))
+            return iterable
+        # If we do not have any specifiers, then we need to have a rough filter
+        # which will filter out any pre-releases, unless there are no final
+        # releases, and which will filter out LegacyVersion in general.
+        else:
+            filtered = []
+            found_prereleases = []
+
+            for item in iterable:
+                # Ensure that we some kind of Version class for this item.
+                if not isinstance(item, (LegacyVersion, Version)):
+                    parsed_version = parse(item)
+                else:
+                    parsed_version = item
+
+                # Filter out any item which is parsed as a LegacyVersion
+                if isinstance(parsed_version, LegacyVersion):
+                    continue
+
+                # Store any item which is a pre-release for later unless we've
+                # already found a final version or we are accepting prereleases
+                if parsed_version.is_prerelease and not prereleases:
+                    if not filtered:
+                        found_prereleases.append(item)
+                else:
+                    filtered.append(item)
+
+            # If we've found no items except for pre-releases, then we'll go
+            # ahead and use the pre-releases
+            if not filtered and found_prereleases and prereleases is None:
+                return found_prereleases
+
+            return filtered
diff --git a/pkg_resources/_vendor/packaging/utils.py b/pkg_resources/_vendor/packaging/utils.py
new file mode 100644
index 0000000..942387c
--- /dev/null
+++ b/pkg_resources/_vendor/packaging/utils.py
@@ -0,0 +1,14 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import re
+
+
+_canonicalize_regex = re.compile(r"[-_.]+")
+
+
+def canonicalize_name(name):
+    # This is taken from PEP 503.
+    return _canonicalize_regex.sub("-", name).lower()
diff --git a/pkg_resources/_vendor/packaging/version.py b/pkg_resources/_vendor/packaging/version.py
new file mode 100644
index 0000000..83b5ee8
--- /dev/null
+++ b/pkg_resources/_vendor/packaging/version.py
@@ -0,0 +1,393 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import collections
+import itertools
+import re
+
+from ._structures import Infinity
+
+
+__all__ = [
+    "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
+]
+
+
+_Version = collections.namedtuple(
+    "_Version",
+    ["epoch", "release", "dev", "pre", "post", "local"],
+)
+
+
+def parse(version):
+    """
+    Parse the given version string and return either a :class:`Version` object
+    or a :class:`LegacyVersion` object depending on if the given version is
+    a valid PEP 440 version or a legacy version.
+    """
+    try:
+        return Version(version)
+    except InvalidVersion:
+        return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+    """
+    An invalid version was found, users should refer to PEP 440.
+    """
+
+
+class _BaseVersion(object):
+
+    def __hash__(self):
+        return hash(self._key)
+
+    def __lt__(self, other):
+        return self._compare(other, lambda s, o: s < o)
+
+    def __le__(self, other):
+        return self._compare(other, lambda s, o: s <= o)
+
+    def __eq__(self, other):
+        return self._compare(other, lambda s, o: s == o)
+
+    def __ge__(self, other):
+        return self._compare(other, lambda s, o: s >= o)
+
+    def __gt__(self, other):
+        return self._compare(other, lambda s, o: s > o)
+
+    def __ne__(self, other):
+        return self._compare(other, lambda s, o: s != o)
+
+    def _compare(self, other, method):
+        if not isinstance(other, _BaseVersion):
+            return NotImplemented
+
+        return method(self._key, other._key)
+
+
+class LegacyVersion(_BaseVersion):
+
+    def __init__(self, version):
+        self._version = str(version)
+        self._key = _legacy_cmpkey(self._version)
+
+    def __str__(self):
+        return self._version
+
+    def __repr__(self):
+        return "<LegacyVersion({0})>".format(repr(str(self)))
+
+    @property
+    def public(self):
+        return self._version
+
+    @property
+    def base_version(self):
+        return self._version
+
+    @property
+    def local(self):
+        return None
+
+    @property
+    def is_prerelease(self):
+        return False
+
+    @property
+    def is_postrelease(self):
+        return False
+
+
+_legacy_version_component_re = re.compile(
+    r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
+)
+
+_legacy_version_replacement_map = {
+    "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
+}
+
+
+def _parse_version_parts(s):
+    for part in _legacy_version_component_re.split(s):
+        part = _legacy_version_replacement_map.get(part, part)
+
+        if not part or part == ".":
+            continue
+
+        if part[:1] in "0123456789":
+            # pad for numeric comparison
+            yield part.zfill(8)
+        else:
+            yield "*" + part
+
+    # ensure that alpha/beta/candidate are before final
+    yield "*final"
+
+
+def _legacy_cmpkey(version):
+    # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
+    # greater than or equal to 0. This will effectively put the LegacyVersion,
+    # which uses the defacto standard originally implemented by setuptools,
+    # as before all PEP 440 versions.
+    epoch = -1
+
+    # This scheme is taken from pkg_resources.parse_version setuptools prior to
+    # it's adoption of the packaging library.
+    parts = []
+    for part in _parse_version_parts(version.lower()):
+        if part.startswith("*"):
+            # remove "-" before a prerelease tag
+            if part < "*final":
+                while parts and parts[-1] == "*final-":
+                    parts.pop()
+
+            # remove trailing zeros from each series of numeric parts
+            while parts and parts[-1] == "00000000":
+                parts.pop()
+
+        parts.append(part)
+    parts = tuple(parts)
+
+    return epoch, parts
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+    v?
+    (?:
+        (?:(?P<epoch>[0-9]+)!)?                           # epoch
+        (?P<release>[0-9]+(?:\.[0-9]+)*)                  # release segment
+        (?P<pre>                                          # pre-release
+            [-_\.]?
+            (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P<pre_n>[0-9]+)?
+        )?
+        (?P<post>                                         # post release
+            (?:-(?P<post_n1>[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?P<post_l>post|rev|r)
+                [-_\.]?
+                (?P<post_n2>[0-9]+)?
+            )
+        )?
+        (?P<dev>                                          # dev release
+            [-_\.]?
+            (?P<dev_l>dev)
+            [-_\.]?
+            (?P<dev_n>[0-9]+)?
+        )?
+    )
+    (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return "<Version({0})>".format(repr(str(self)))
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(".post{0}".format(self._version.post[1]))
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(".dev{0}".format(self._version.dev[1]))
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                "+{0}".format(".".join(str(x) for x in self._version.local))
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/pkg_resources/_vendor/pyparsing.py b/pkg_resources/_vendor/pyparsing.py
new file mode 100644
index 0000000..a212243
--- /dev/null
+++ b/pkg_resources/_vendor/pyparsing.py
@@ -0,0 +1,5696 @@
+# module pyparsing.py

+#

+# Copyright (c) 2003-2016  Paul T. McGuire

+#

+# Permission is hereby granted, free of charge, to any person obtaining

+# a copy of this software and associated documentation files (the

+# "Software"), to deal in the Software without restriction, including

+# without limitation the rights to use, copy, modify, merge, publish,

+# distribute, sublicense, and/or sell copies of the Software, and to

+# permit persons to whom the Software is furnished to do so, subject to

+# the following conditions:

+#

+# The above copyright notice and this permission notice shall be

+# included in all copies or substantial portions of the Software.

+#

+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,

+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF

+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.

+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY

+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,

+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE

+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+#

+

+__doc__ = \

+"""

+pyparsing module - Classes and methods to define and execute parsing grammars

+

+The pyparsing module is an alternative approach to creating and executing simple grammars,

+vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you

+don't need to learn a new syntax for defining grammars or matching expressions - the parsing module

+provides a library of classes that you use to construct the grammar directly in Python.

+

+Here is a program to parse "Hello, World!" (or any greeting of the form 

+C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements 

+(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to

+L{Literal} expressions)::

+

+    from pyparsing import Word, alphas

+

+    # define grammar of a greeting

+    greet = Word(alphas) + "," + Word(alphas) + "!"

+

+    hello = "Hello, World!"

+    print (hello, "->", greet.parseString(hello))

+

+The program outputs the following::

+

+    Hello, World! -> ['Hello', ',', 'World', '!']

+

+The Python representation of the grammar is quite readable, owing to the self-explanatory

+class names, and the use of '+', '|' and '^' operators.

+

+The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an

+object with named attributes.

+

+The pyparsing module handles some of the problems that are typically vexing when writing text parsers:

+ - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)

+ - quoted strings

+ - embedded comments

+"""

+

+__version__ = "2.1.10"

+__versionTime__ = "07 Oct 2016 01:31 UTC"

+__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"

+

+import string

+from weakref import ref as wkref

+import copy

+import sys

+import warnings

+import re

+import sre_constants

+import collections

+import pprint

+import traceback

+import types

+from datetime import datetime

+

+try:

+    from _thread import RLock

+except ImportError:

+    from threading import RLock

+

+try:

+    from collections import OrderedDict as _OrderedDict

+except ImportError:

+    try:

+        from ordereddict import OrderedDict as _OrderedDict

+    except ImportError:

+        _OrderedDict = None

+

+#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )

+

+__all__ = [

+'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',

+'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',

+'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',

+'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',

+'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',

+'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 

+'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',

+'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',

+'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',

+'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',

+'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',

+'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',

+'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',

+'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 

+'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',

+'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',

+'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',

+'CloseMatch', 'tokenMap', 'pyparsing_common',

+]

+

+system_version = tuple(sys.version_info)[:3]

+PY_3 = system_version[0] == 3

+if PY_3:

+    _MAX_INT = sys.maxsize

+    basestring = str

+    unichr = chr

+    _ustr = str

+

+    # build list of single arg builtins, that can be used as parse actions

+    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]

+

+else:

+    _MAX_INT = sys.maxint

+    range = xrange

+

+    def _ustr(obj):

+        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries

+           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It

+           then < returns the unicode object | encodes it with the default encoding | ... >.

+        """

+        if isinstance(obj,unicode):

+            return obj

+

+        try:

+            # If this works, then _ustr(obj) has the same behaviour as str(obj), so

+            # it won't break any existing code.

+            return str(obj)

+

+        except UnicodeEncodeError:

+            # Else encode it

+            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')

+            xmlcharref = Regex('&#\d+;')

+            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])

+            return xmlcharref.transformString(ret)

+

+    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions

+    singleArgBuiltins = []

+    import __builtin__

+    for fname in "sum len sorted reversed list tuple set any all min max".split():

+        try:

+            singleArgBuiltins.append(getattr(__builtin__,fname))

+        except AttributeError:

+            continue

+            

+_generatorType = type((y for y in range(1)))

+ 

+def _xml_escape(data):

+    """Escape &, <, >, ", ', etc. in a string of data."""

+

+    # ampersand must be replaced first

+    from_symbols = '&><"\''

+    to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())

+    for from_,to_ in zip(from_symbols, to_symbols):

+        data = data.replace(from_, to_)

+    return data

+

+class _Constants(object):

+    pass

+

+alphas     = string.ascii_uppercase + string.ascii_lowercase

+nums       = "0123456789"

+hexnums    = nums + "ABCDEFabcdef"

+alphanums  = alphas + nums

+_bslash    = chr(92)

+printables = "".join(c for c in string.printable if c not in string.whitespace)

+

+class ParseBaseException(Exception):

+    """base exception class for all parsing runtime exceptions"""

+    # Performance tuning: we construct a *lot* of these, so keep this

+    # constructor as small and fast as possible

+    def __init__( self, pstr, loc=0, msg=None, elem=None ):

+        self.loc = loc

+        if msg is None:

+            self.msg = pstr

+            self.pstr = ""

+        else:

+            self.msg = msg

+            self.pstr = pstr

+        self.parserElement = elem

+        self.args = (pstr, loc, msg)

+

+    @classmethod

+    def _from_exception(cls, pe):

+        """

+        internal factory method to simplify creating one type of ParseException 

+        from another - avoids having __init__ signature conflicts among subclasses

+        """

+        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)

+

+    def __getattr__( self, aname ):

+        """supported attributes by name are:

+            - lineno - returns the line number of the exception text

+            - col - returns the column number of the exception text

+            - line - returns the line containing the exception text

+        """

+        if( aname == "lineno" ):

+            return lineno( self.loc, self.pstr )

+        elif( aname in ("col", "column") ):

+            return col( self.loc, self.pstr )

+        elif( aname == "line" ):

+            return line( self.loc, self.pstr )

+        else:

+            raise AttributeError(aname)

+

+    def __str__( self ):

+        return "%s (at char %d), (line:%d, col:%d)" % \

+                ( self.msg, self.loc, self.lineno, self.column )

+    def __repr__( self ):

+        return _ustr(self)

+    def markInputline( self, markerString = ">!<" ):

+        """Extracts the exception line from the input string, and marks

+           the location of the exception with a special symbol.

+        """

+        line_str = self.line

+        line_column = self.column - 1

+        if markerString:

+            line_str = "".join((line_str[:line_column],

+                                markerString, line_str[line_column:]))

+        return line_str.strip()

+    def __dir__(self):

+        return "lineno col line".split() + dir(type(self))

+

+class ParseException(ParseBaseException):

+    """

+    Exception thrown when parse expressions don't match class;

+    supported attributes by name are:

+     - lineno - returns the line number of the exception text

+     - col - returns the column number of the exception text

+     - line - returns the line containing the exception text

+        

+    Example::

+        try:

+            Word(nums).setName("integer").parseString("ABC")

+        except ParseException as pe:

+            print(pe)

+            print("column: {}".format(pe.col))

+            

+    prints::

+       Expected integer (at char 0), (line:1, col:1)

+        column: 1

+    """

+    pass

+

+class ParseFatalException(ParseBaseException):

+    """user-throwable exception thrown when inconsistent parse content

+       is found; stops all parsing immediately"""

+    pass

+

+class ParseSyntaxException(ParseFatalException):

+    """just like L{ParseFatalException}, but thrown internally when an

+       L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop 

+       immediately because an unbacktrackable syntax error has been found"""

+    pass

+

+#~ class ReparseException(ParseBaseException):

+    #~ """Experimental class - parse actions can raise this exception to cause

+       #~ pyparsing to reparse the input string:

+        #~ - with a modified input string, and/or

+        #~ - with a modified start location

+       #~ Set the values of the ReparseException in the constructor, and raise the

+       #~ exception in a parse action to cause pyparsing to use the new string/location.

+       #~ Setting the values as None causes no change to be made.

+       #~ """

+    #~ def __init_( self, newstring, restartLoc ):

+        #~ self.newParseText = newstring

+        #~ self.reparseLoc = restartLoc

+

+class RecursiveGrammarException(Exception):

+    """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""

+    def __init__( self, parseElementList ):

+        self.parseElementTrace = parseElementList

+

+    def __str__( self ):

+        return "RecursiveGrammarException: %s" % self.parseElementTrace

+

+class _ParseResultsWithOffset(object):

+    def __init__(self,p1,p2):

+        self.tup = (p1,p2)

+    def __getitem__(self,i):

+        return self.tup[i]

+    def __repr__(self):

+        return repr(self.tup[0])

+    def setOffset(self,i):

+        self.tup = (self.tup[0],i)

+

+class ParseResults(object):

+    """

+    Structured parse results, to provide multiple means of access to the parsed data:

+       - as a list (C{len(results)})

+       - by list index (C{results[0], results[1]}, etc.)

+       - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})

+

+    Example::

+        integer = Word(nums)

+        date_str = (integer.setResultsName("year") + '/' 

+                        + integer.setResultsName("month") + '/' 

+                        + integer.setResultsName("day"))

+        # equivalent form:

+        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

+

+        # parseString returns a ParseResults object

+        result = date_str.parseString("1999/12/31")

+

+        def test(s, fn=repr):

+            print("%s -> %s" % (s, fn(eval(s))))

+        test("list(result)")

+        test("result[0]")

+        test("result['month']")

+        test("result.day")

+        test("'month' in result")

+        test("'minutes' in result")

+        test("result.dump()", str)

+    prints::

+        list(result) -> ['1999', '/', '12', '/', '31']

+        result[0] -> '1999'

+        result['month'] -> '12'

+        result.day -> '31'

+        'month' in result -> True

+        'minutes' in result -> False

+        result.dump() -> ['1999', '/', '12', '/', '31']

+        - day: 31

+        - month: 12

+        - year: 1999

+    """

+    def __new__(cls, toklist=None, name=None, asList=True, modal=True ):

+        if isinstance(toklist, cls):

+            return toklist

+        retobj = object.__new__(cls)

+        retobj.__doinit = True

+        return retobj

+

+    # Performance tuning: we construct a *lot* of these, so keep this

+    # constructor as small and fast as possible

+    def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):

+        if self.__doinit:

+            self.__doinit = False

+            self.__name = None

+            self.__parent = None

+            self.__accumNames = {}

+            self.__asList = asList

+            self.__modal = modal

+            if toklist is None:

+                toklist = []

+            if isinstance(toklist, list):

+                self.__toklist = toklist[:]

+            elif isinstance(toklist, _generatorType):

+                self.__toklist = list(toklist)

+            else:

+                self.__toklist = [toklist]

+            self.__tokdict = dict()

+

+        if name is not None and name:

+            if not modal:

+                self.__accumNames[name] = 0

+            if isinstance(name,int):

+                name = _ustr(name) # will always return a str, but use _ustr for consistency

+            self.__name = name

+            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):

+                if isinstance(toklist,basestring):

+                    toklist = [ toklist ]

+                if asList:

+                    if isinstance(toklist,ParseResults):

+                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)

+                    else:

+                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)

+                    self[name].__name = name

+                else:

+                    try:

+                        self[name] = toklist[0]

+                    except (KeyError,TypeError,IndexError):

+                        self[name] = toklist

+

+    def __getitem__( self, i ):

+        if isinstance( i, (int,slice) ):

+            return self.__toklist[i]

+        else:

+            if i not in self.__accumNames:

+                return self.__tokdict[i][-1][0]

+            else:

+                return ParseResults([ v[0] for v in self.__tokdict[i] ])

+

+    def __setitem__( self, k, v, isinstance=isinstance ):

+        if isinstance(v,_ParseResultsWithOffset):

+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]

+            sub = v[0]

+        elif isinstance(k,(int,slice)):

+            self.__toklist[k] = v

+            sub = v

+        else:

+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]

+            sub = v

+        if isinstance(sub,ParseResults):

+            sub.__parent = wkref(self)

+

+    def __delitem__( self, i ):

+        if isinstance(i,(int,slice)):

+            mylen = len( self.__toklist )

+            del self.__toklist[i]

+

+            # convert int to slice

+            if isinstance(i, int):

+                if i < 0:

+                    i += mylen

+                i = slice(i, i+1)

+            # get removed indices

+            removed = list(range(*i.indices(mylen)))

+            removed.reverse()

+            # fixup indices in token dictionary

+            for name,occurrences in self.__tokdict.items():

+                for j in removed:

+                    for k, (value, position) in enumerate(occurrences):

+                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))

+        else:

+            del self.__tokdict[i]

+

+    def __contains__( self, k ):

+        return k in self.__tokdict

+

+    def __len__( self ): return len( self.__toklist )

+    def __bool__(self): return ( not not self.__toklist )

+    __nonzero__ = __bool__

+    def __iter__( self ): return iter( self.__toklist )

+    def __reversed__( self ): return iter( self.__toklist[::-1] )

+    def _iterkeys( self ):

+        if hasattr(self.__tokdict, "iterkeys"):

+            return self.__tokdict.iterkeys()

+        else:

+            return iter(self.__tokdict)

+

+    def _itervalues( self ):

+        return (self[k] for k in self._iterkeys())

+            

+    def _iteritems( self ):

+        return ((k, self[k]) for k in self._iterkeys())

+

+    if PY_3:

+        keys = _iterkeys       

+        """Returns an iterator of all named result keys (Python 3.x only)."""

+

+        values = _itervalues

+        """Returns an iterator of all named result values (Python 3.x only)."""

+

+        items = _iteritems

+        """Returns an iterator of all named result key-value tuples (Python 3.x only)."""

+

+    else:

+        iterkeys = _iterkeys

+        """Returns an iterator of all named result keys (Python 2.x only)."""

+

+        itervalues = _itervalues

+        """Returns an iterator of all named result values (Python 2.x only)."""

+

+        iteritems = _iteritems

+        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""

+

+        def keys( self ):

+            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""

+            return list(self.iterkeys())

+

+        def values( self ):

+            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""

+            return list(self.itervalues())

+                

+        def items( self ):

+            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""

+            return list(self.iteritems())

+

+    def haskeys( self ):

+        """Since keys() returns an iterator, this method is helpful in bypassing

+           code that looks for the existence of any defined results names."""

+        return bool(self.__tokdict)

+        

+    def pop( self, *args, **kwargs):

+        """

+        Removes and returns item at specified index (default=C{last}).

+        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no

+        argument or an integer argument, it will use C{list} semantics

+        and pop tokens from the list of parsed tokens. If passed a 

+        non-integer argument (most likely a string), it will use C{dict}

+        semantics and pop the corresponding value from any defined 

+        results names. A second default return value argument is 

+        supported, just as in C{dict.pop()}.

+

+        Example::

+            def remove_first(tokens):

+                tokens.pop(0)

+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']

+            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']

+

+            label = Word(alphas)

+            patt = label("LABEL") + OneOrMore(Word(nums))

+            print(patt.parseString("AAB 123 321").dump())

+

+            # Use pop() in a parse action to remove named result (note that corresponding value is not

+            # removed from list form of results)

+            def remove_LABEL(tokens):

+                tokens.pop("LABEL")

+                return tokens

+            patt.addParseAction(remove_LABEL)

+            print(patt.parseString("AAB 123 321").dump())

+        prints::

+            ['AAB', '123', '321']

+            - LABEL: AAB

+

+            ['AAB', '123', '321']

+        """

+        if not args:

+            args = [-1]

+        for k,v in kwargs.items():

+            if k == 'default':

+                args = (args[0], v)

+            else:

+                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)

+        if (isinstance(args[0], int) or 

+                        len(args) == 1 or 

+                        args[0] in self):

+            index = args[0]

+            ret = self[index]

+            del self[index]

+            return ret

+        else:

+            defaultvalue = args[1]

+            return defaultvalue

+

+    def get(self, key, defaultValue=None):

+        """

+        Returns named result matching the given key, or if there is no

+        such name, then returns the given C{defaultValue} or C{None} if no

+        C{defaultValue} is specified.

+

+        Similar to C{dict.get()}.

+        

+        Example::

+            integer = Word(nums)

+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

+

+            result = date_str.parseString("1999/12/31")

+            print(result.get("year")) # -> '1999'

+            print(result.get("hour", "not specified")) # -> 'not specified'

+            print(result.get("hour")) # -> None

+        """

+        if key in self:

+            return self[key]

+        else:

+            return defaultValue

+

+    def insert( self, index, insStr ):

+        """

+        Inserts new element at location index in the list of parsed tokens.

+        

+        Similar to C{list.insert()}.

+

+        Example::

+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']

+

+            # use a parse action to insert the parse location in the front of the parsed results

+            def insert_locn(locn, tokens):

+                tokens.insert(0, locn)

+            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']

+        """

+        self.__toklist.insert(index, insStr)

+        # fixup indices in token dictionary

+        for name,occurrences in self.__tokdict.items():

+            for k, (value, position) in enumerate(occurrences):

+                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))

+

+    def append( self, item ):

+        """

+        Add single element to end of ParseResults list of elements.

+

+        Example::

+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']

+            

+            # use a parse action to compute the sum of the parsed integers, and add it to the end

+            def append_sum(tokens):

+                tokens.append(sum(map(int, tokens)))

+            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]

+        """

+        self.__toklist.append(item)

+

+    def extend( self, itemseq ):

+        """

+        Add sequence of elements to end of ParseResults list of elements.

+

+        Example::

+            patt = OneOrMore(Word(alphas))

+            

+            # use a parse action to append the reverse of the matched strings, to make a palindrome

+            def make_palindrome(tokens):

+                tokens.extend(reversed([t[::-1] for t in tokens]))

+                return ''.join(tokens)

+            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'

+        """

+        if isinstance(itemseq, ParseResults):

+            self += itemseq

+        else:

+            self.__toklist.extend(itemseq)

+

+    def clear( self ):

+        """

+        Clear all elements and results names.

+        """

+        del self.__toklist[:]

+        self.__tokdict.clear()

+

+    def __getattr__( self, name ):

+        try:

+            return self[name]

+        except KeyError:

+            return ""

+            

+        if name in self.__tokdict:

+            if name not in self.__accumNames:

+                return self.__tokdict[name][-1][0]

+            else:

+                return ParseResults([ v[0] for v in self.__tokdict[name] ])

+        else:

+            return ""

+

+    def __add__( self, other ):

+        ret = self.copy()

+        ret += other

+        return ret

+

+    def __iadd__( self, other ):

+        if other.__tokdict:

+            offset = len(self.__toklist)

+            addoffset = lambda a: offset if a<0 else a+offset

+            otheritems = other.__tokdict.items()

+            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )

+                                for (k,vlist) in otheritems for v in vlist]

+            for k,v in otherdictitems:

+                self[k] = v

+                if isinstance(v[0],ParseResults):

+                    v[0].__parent = wkref(self)

+            

+        self.__toklist += other.__toklist

+        self.__accumNames.update( other.__accumNames )

+        return self

+

+    def __radd__(self, other):

+        if isinstance(other,int) and other == 0:

+            # useful for merging many ParseResults using sum() builtin

+            return self.copy()

+        else:

+            # this may raise a TypeError - so be it

+            return other + self

+        

+    def __repr__( self ):

+        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )

+

+    def __str__( self ):

+        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'

+

+    def _asStringList( self, sep='' ):

+        out = []

+        for item in self.__toklist:

+            if out and sep:

+                out.append(sep)

+            if isinstance( item, ParseResults ):

+                out += item._asStringList()

+            else:

+                out.append( _ustr(item) )

+        return out

+

+    def asList( self ):

+        """

+        Returns the parse results as a nested list of matching tokens, all converted to strings.

+

+        Example::

+            patt = OneOrMore(Word(alphas))

+            result = patt.parseString("sldkj lsdkj sldkj")

+            # even though the result prints in string-like form, it is actually a pyparsing ParseResults

+            print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']

+            

+            # Use asList() to create an actual list

+            result_list = result.asList()

+            print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']

+        """

+        return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]

+

+    def asDict( self ):

+        """

+        Returns the named parse results as a nested dictionary.

+

+        Example::

+            integer = Word(nums)

+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

+            

+            result = date_str.parseString('12/31/1999')

+            print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})

+            

+            result_dict = result.asDict()

+            print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}

+

+            # even though a ParseResults supports dict-like access, sometime you just need to have a dict

+            import json

+            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable

+            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}

+        """

+        if PY_3:

+            item_fn = self.items

+        else:

+            item_fn = self.iteritems

+            

+        def toItem(obj):

+            if isinstance(obj, ParseResults):

+                if obj.haskeys():

+                    return obj.asDict()

+                else:

+                    return [toItem(v) for v in obj]

+            else:

+                return obj

+                

+        return dict((k,toItem(v)) for k,v in item_fn())

+

+    def copy( self ):

+        """

+        Returns a new copy of a C{ParseResults} object.

+        """

+        ret = ParseResults( self.__toklist )

+        ret.__tokdict = self.__tokdict.copy()

+        ret.__parent = self.__parent

+        ret.__accumNames.update( self.__accumNames )

+        ret.__name = self.__name

+        return ret

+

+    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):

+        """

+        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.

+        """

+        nl = "\n"

+        out = []

+        namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()

+                                                            for v in vlist)

+        nextLevelIndent = indent + "  "

+

+        # collapse out indents if formatting is not desired

+        if not formatted:

+            indent = ""

+            nextLevelIndent = ""

+            nl = ""

+

+        selfTag = None

+        if doctag is not None:

+            selfTag = doctag

+        else:

+            if self.__name:

+                selfTag = self.__name

+

+        if not selfTag:

+            if namedItemsOnly:

+                return ""

+            else:

+                selfTag = "ITEM"

+

+        out += [ nl, indent, "<", selfTag, ">" ]

+

+        for i,res in enumerate(self.__toklist):

+            if isinstance(res,ParseResults):

+                if i in namedItems:

+                    out += [ res.asXML(namedItems[i],

+                                        namedItemsOnly and doctag is None,

+                                        nextLevelIndent,

+                                        formatted)]

+                else:

+                    out += [ res.asXML(None,

+                                        namedItemsOnly and doctag is None,

+                                        nextLevelIndent,

+                                        formatted)]

+            else:

+                # individual token, see if there is a name for it

+                resTag = None

+                if i in namedItems:

+                    resTag = namedItems[i]

+                if not resTag:

+                    if namedItemsOnly:

+                        continue

+                    else:

+                        resTag = "ITEM"

+                xmlBodyText = _xml_escape(_ustr(res))

+                out += [ nl, nextLevelIndent, "<", resTag, ">",

+                                                xmlBodyText,

+                                                "</", resTag, ">" ]

+

+        out += [ nl, indent, "</", selfTag, ">" ]

+        return "".join(out)

+

+    def __lookup(self,sub):

+        for k,vlist in self.__tokdict.items():

+            for v,loc in vlist:

+                if sub is v:

+                    return k

+        return None

+

+    def getName(self):

+        """

+        Returns the results name for this token expression. Useful when several 

+        different expressions might match at a particular location.

+

+        Example::

+            integer = Word(nums)

+            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")

+            house_number_expr = Suppress('#') + Word(nums, alphanums)

+            user_data = (Group(house_number_expr)("house_number") 

+                        | Group(ssn_expr)("ssn")

+                        | Group(integer)("age"))

+            user_info = OneOrMore(user_data)

+            

+            result = user_info.parseString("22 111-22-3333 #221B")

+            for item in result:

+                print(item.getName(), ':', item[0])

+        prints::

+            age : 22

+            ssn : 111-22-3333

+            house_number : 221B

+        """

+        if self.__name:

+            return self.__name

+        elif self.__parent:

+            par = self.__parent()

+            if par:

+                return par.__lookup(self)

+            else:

+                return None

+        elif (len(self) == 1 and

+               len(self.__tokdict) == 1 and

+               next(iter(self.__tokdict.values()))[0][1] in (0,-1)):

+            return next(iter(self.__tokdict.keys()))

+        else:

+            return None

+

+    def dump(self, indent='', depth=0, full=True):

+        """

+        Diagnostic method for listing out the contents of a C{ParseResults}.

+        Accepts an optional C{indent} argument so that this string can be embedded

+        in a nested display of other data.

+

+        Example::

+            integer = Word(nums)

+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

+            

+            result = date_str.parseString('12/31/1999')

+            print(result.dump())

+        prints::

+            ['12', '/', '31', '/', '1999']

+            - day: 1999

+            - month: 31

+            - year: 12

+        """

+        out = []

+        NL = '\n'

+        out.append( indent+_ustr(self.asList()) )

+        if full:

+            if self.haskeys():

+                items = sorted((str(k), v) for k,v in self.items())

+                for k,v in items:

+                    if out:

+                        out.append(NL)

+                    out.append( "%s%s- %s: " % (indent,('  '*depth), k) )

+                    if isinstance(v,ParseResults):

+                        if v:

+                            out.append( v.dump(indent,depth+1) )

+                        else:

+                            out.append(_ustr(v))

+                    else:

+                        out.append(repr(v))

+            elif any(isinstance(vv,ParseResults) for vv in self):

+                v = self

+                for i,vv in enumerate(v):

+                    if isinstance(vv,ParseResults):

+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),vv.dump(indent,depth+1) ))

+                    else:

+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),_ustr(vv)))

+            

+        return "".join(out)

+

+    def pprint(self, *args, **kwargs):

+        """

+        Pretty-printer for parsed results as a list, using the C{pprint} module.

+        Accepts additional positional or keyword args as defined for the 

+        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})

+

+        Example::

+            ident = Word(alphas, alphanums)

+            num = Word(nums)

+            func = Forward()

+            term = ident | num | Group('(' + func + ')')

+            func <<= ident + Group(Optional(delimitedList(term)))

+            result = func.parseString("fna a,b,(fnb c,d,200),100")

+            result.pprint(width=40)

+        prints::

+            ['fna',

+             ['a',

+              'b',

+              ['(', 'fnb', ['c', 'd', '200'], ')'],

+              '100']]

+        """

+        pprint.pprint(self.asList(), *args, **kwargs)

+

+    # add support for pickle protocol

+    def __getstate__(self):

+        return ( self.__toklist,

+                 ( self.__tokdict.copy(),

+                   self.__parent is not None and self.__parent() or None,

+                   self.__accumNames,

+                   self.__name ) )

+

+    def __setstate__(self,state):

+        self.__toklist = state[0]

+        (self.__tokdict,

+         par,

+         inAccumNames,

+         self.__name) = state[1]

+        self.__accumNames = {}

+        self.__accumNames.update(inAccumNames)

+        if par is not None:

+            self.__parent = wkref(par)

+        else:

+            self.__parent = None

+

+    def __getnewargs__(self):

+        return self.__toklist, self.__name, self.__asList, self.__modal

+

+    def __dir__(self):

+        return (dir(type(self)) + list(self.keys()))

+

+collections.MutableMapping.register(ParseResults)

+

+def col (loc,strg):

+    """Returns current column within a string, counting newlines as line separators.

+   The first column is number 1.

+

+   Note: the default parsing behavior is to expand tabs in the input string

+   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information

+   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a

+   consistent view of the parsed string, the parse location, and line and column

+   positions within the parsed string.

+   """

+    s = strg

+    return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)

+

+def lineno(loc,strg):

+    """Returns current line number within a string, counting newlines as line separators.

+   The first line is number 1.

+

+   Note: the default parsing behavior is to expand tabs in the input string

+   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information

+   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a

+   consistent view of the parsed string, the parse location, and line and column

+   positions within the parsed string.

+   """

+    return strg.count("\n",0,loc) + 1

+

+def line( loc, strg ):

+    """Returns the line of text containing loc within a string, counting newlines as line separators.

+       """

+    lastCR = strg.rfind("\n", 0, loc)

+    nextCR = strg.find("\n", loc)

+    if nextCR >= 0:

+        return strg[lastCR+1:nextCR]

+    else:

+        return strg[lastCR+1:]

+

+def _defaultStartDebugAction( instring, loc, expr ):

+    print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))

+

+def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):

+    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))

+

+def _defaultExceptionDebugAction( instring, loc, expr, exc ):

+    print ("Exception raised:" + _ustr(exc))

+

+def nullDebugAction(*args):

+    """'Do-nothing' debug action, to suppress debugging output during parsing."""

+    pass

+

+# Only works on Python 3.x - nonlocal is toxic to Python 2 installs

+#~ 'decorator to trim function calls to match the arity of the target'

+#~ def _trim_arity(func, maxargs=3):

+    #~ if func in singleArgBuiltins:

+        #~ return lambda s,l,t: func(t)

+    #~ limit = 0

+    #~ foundArity = False

+    #~ def wrapper(*args):

+        #~ nonlocal limit,foundArity

+        #~ while 1:

+            #~ try:

+                #~ ret = func(*args[limit:])

+                #~ foundArity = True

+                #~ return ret

+            #~ except TypeError:

+                #~ if limit == maxargs or foundArity:

+                    #~ raise

+                #~ limit += 1

+                #~ continue

+    #~ return wrapper

+

+# this version is Python 2.x-3.x cross-compatible

+'decorator to trim function calls to match the arity of the target'

+def _trim_arity(func, maxargs=2):

+    if func in singleArgBuiltins:

+        return lambda s,l,t: func(t)

+    limit = [0]

+    foundArity = [False]

+    

+    # traceback return data structure changed in Py3.5 - normalize back to plain tuples

+    if system_version[:2] >= (3,5):

+        def extract_stack(limit=0):

+            # special handling for Python 3.5.0 - extra deep call stack by 1

+            offset = -3 if system_version == (3,5,0) else -2

+            frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]

+            return [(frame_summary.filename, frame_summary.lineno)]

+        def extract_tb(tb, limit=0):

+            frames = traceback.extract_tb(tb, limit=limit)

+            frame_summary = frames[-1]

+            return [(frame_summary.filename, frame_summary.lineno)]

+    else:

+        extract_stack = traceback.extract_stack

+        extract_tb = traceback.extract_tb

+    

+    # synthesize what would be returned by traceback.extract_stack at the call to 

+    # user's parse action 'func', so that we don't incur call penalty at parse time

+    

+    LINE_DIFF = 6

+    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND 

+    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!

+    this_line = extract_stack(limit=2)[-1]

+    pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)

+

+    def wrapper(*args):

+        while 1:

+            try:

+                ret = func(*args[limit[0]:])

+                foundArity[0] = True

+                return ret

+            except TypeError:

+                # re-raise TypeErrors if they did not come from our arity testing

+                if foundArity[0]:

+                    raise

+                else:

+                    try:

+                        tb = sys.exc_info()[-1]

+                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:

+                            raise

+                    finally:

+                        del tb

+

+                if limit[0] <= maxargs:

+                    limit[0] += 1

+                    continue

+                raise

+

+    # copy func name to wrapper for sensible debug output

+    func_name = "<parse action>"

+    try:

+        func_name = getattr(func, '__name__', 

+                            getattr(func, '__class__').__name__)

+    except Exception:

+        func_name = str(func)

+    wrapper.__name__ = func_name

+

+    return wrapper

+

+class ParserElement(object):

+    """Abstract base level parser element class."""

+    DEFAULT_WHITE_CHARS = " \n\t\r"

+    verbose_stacktrace = False

+

+    @staticmethod

+    def setDefaultWhitespaceChars( chars ):

+        r"""

+        Overrides the default whitespace chars

+

+        Example::

+            # default whitespace chars are space, <TAB> and newline

+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']

+            

+            # change to just treat newline as significant

+            ParserElement.setDefaultWhitespaceChars(" \t")

+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']

+        """

+        ParserElement.DEFAULT_WHITE_CHARS = chars

+

+    @staticmethod

+    def inlineLiteralsUsing(cls):

+        """

+        Set class to be used for inclusion of string literals into a parser.

+        

+        Example::

+            # default literal class used is Literal

+            integer = Word(nums)

+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

+

+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']

+

+

+            # change to Suppress

+            ParserElement.inlineLiteralsUsing(Suppress)

+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

+

+            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']

+        """

+        ParserElement._literalStringClass = cls

+

+    def __init__( self, savelist=False ):

+        self.parseAction = list()

+        self.failAction = None

+        #~ self.name = "<unknown>"  # don't define self.name, let subclasses try/except upcall

+        self.strRepr = None

+        self.resultsName = None

+        self.saveAsList = savelist

+        self.skipWhitespace = True

+        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS

+        self.copyDefaultWhiteChars = True

+        self.mayReturnEmpty = False # used when checking for left-recursion

+        self.keepTabs = False

+        self.ignoreExprs = list()

+        self.debug = False

+        self.streamlined = False

+        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index

+        self.errmsg = ""

+        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)

+        self.debugActions = ( None, None, None ) #custom debug actions

+        self.re = None

+        self.callPreparse = True # used to avoid redundant calls to preParse

+        self.callDuringTry = False

+

+    def copy( self ):

+        """

+        Make a copy of this C{ParserElement}.  Useful for defining different parse actions

+        for the same parsing pattern, using copies of the original parse element.

+        

+        Example::

+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))

+            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")

+            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")

+            

+            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))

+        prints::

+            [5120, 100, 655360, 268435456]

+        Equivalent form of C{expr.copy()} is just C{expr()}::

+            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")

+        """

+        cpy = copy.copy( self )

+        cpy.parseAction = self.parseAction[:]

+        cpy.ignoreExprs = self.ignoreExprs[:]

+        if self.copyDefaultWhiteChars:

+            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS

+        return cpy

+

+    def setName( self, name ):

+        """

+        Define name for this expression, makes debugging and exception messages clearer.

+        

+        Example::

+            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)

+            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)

+        """

+        self.name = name

+        self.errmsg = "Expected " + self.name

+        if hasattr(self,"exception"):

+            self.exception.msg = self.errmsg

+        return self

+

+    def setResultsName( self, name, listAllMatches=False ):

+        """

+        Define name for referencing matching tokens as a nested attribute

+        of the returned parse results.

+        NOTE: this returns a *copy* of the original C{ParserElement} object;

+        this is so that the client can define a basic element, such as an

+        integer, and reference it in multiple places with different names.

+

+        You can also set results names using the abbreviated syntax,

+        C{expr("name")} in place of C{expr.setResultsName("name")} - 

+        see L{I{__call__}<__call__>}.

+

+        Example::

+            date_str = (integer.setResultsName("year") + '/' 

+                        + integer.setResultsName("month") + '/' 

+                        + integer.setResultsName("day"))

+

+            # equivalent form:

+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

+        """

+        newself = self.copy()

+        if name.endswith("*"):

+            name = name[:-1]

+            listAllMatches=True

+        newself.resultsName = name

+        newself.modalResults = not listAllMatches

+        return newself

+

+    def setBreak(self,breakFlag = True):

+        """Method to invoke the Python pdb debugger when this element is

+           about to be parsed. Set C{breakFlag} to True to enable, False to

+           disable.

+        """

+        if breakFlag:

+            _parseMethod = self._parse

+            def breaker(instring, loc, doActions=True, callPreParse=True):

+                import pdb

+                pdb.set_trace()

+                return _parseMethod( instring, loc, doActions, callPreParse )

+            breaker._originalParseMethod = _parseMethod

+            self._parse = breaker

+        else:

+            if hasattr(self._parse,"_originalParseMethod"):

+                self._parse = self._parse._originalParseMethod

+        return self

+

+    def setParseAction( self, *fns, **kwargs ):

+        """

+        Define action to perform when successfully matching parse element definition.

+        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},

+        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:

+         - s   = the original string being parsed (see note below)

+         - loc = the location of the matching substring

+         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object

+        If the functions in fns modify the tokens, they can return them as the return

+        value from fn, and the modified list of tokens will replace the original.

+        Otherwise, fn does not need to return any value.

+

+        Optional keyword arguments:

+         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing

+

+        Note: the default parsing behavior is to expand tabs in the input string

+        before starting the parsing process.  See L{I{parseString}<parseString>} for more information

+        on parsing strings containing C{<TAB>}s, and suggested methods to maintain a

+        consistent view of the parsed string, the parse location, and line and column

+        positions within the parsed string.

+        

+        Example::

+            integer = Word(nums)

+            date_str = integer + '/' + integer + '/' + integer

+

+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']

+

+            # use parse action to convert to ints at parse time

+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))

+            date_str = integer + '/' + integer + '/' + integer

+

+            # note that integer fields are now ints, not strings

+            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]

+        """

+        self.parseAction = list(map(_trim_arity, list(fns)))

+        self.callDuringTry = kwargs.get("callDuringTry", False)

+        return self

+

+    def addParseAction( self, *fns, **kwargs ):

+        """

+        Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.

+        

+        See examples in L{I{copy}<copy>}.

+        """

+        self.parseAction += list(map(_trim_arity, list(fns)))

+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)

+        return self

+

+    def addCondition(self, *fns, **kwargs):

+        """Add a boolean predicate function to expression's list of parse actions. See 

+        L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, 

+        functions passed to C{addCondition} need to return boolean success/fail of the condition.

+

+        Optional keyword arguments:

+         - message = define a custom message to be used in the raised exception

+         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException

+         

+        Example::

+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))

+            year_int = integer.copy()

+            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")

+            date_str = year_int + '/' + integer + '/' + integer

+

+            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)

+        """

+        msg = kwargs.get("message", "failed user-defined condition")

+        exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException

+        for fn in fns:

+            def pa(s,l,t):

+                if not bool(_trim_arity(fn)(s,l,t)):

+                    raise exc_type(s,l,msg)

+            self.parseAction.append(pa)

+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)

+        return self

+

+    def setFailAction( self, fn ):

+        """Define action to perform if parsing fails at this expression.

+           Fail acton fn is a callable function that takes the arguments

+           C{fn(s,loc,expr,err)} where:

+            - s = string being parsed

+            - loc = location where expression match was attempted and failed

+            - expr = the parse expression that failed

+            - err = the exception thrown

+           The function returns no value.  It may throw C{L{ParseFatalException}}

+           if it is desired to stop parsing immediately."""

+        self.failAction = fn

+        return self

+

+    def _skipIgnorables( self, instring, loc ):

+        exprsFound = True

+        while exprsFound:

+            exprsFound = False

+            for e in self.ignoreExprs:

+                try:

+                    while 1:

+                        loc,dummy = e._parse( instring, loc )

+                        exprsFound = True

+                except ParseException:

+                    pass

+        return loc

+

+    def preParse( self, instring, loc ):

+        if self.ignoreExprs:

+            loc = self._skipIgnorables( instring, loc )

+

+        if self.skipWhitespace:

+            wt = self.whiteChars

+            instrlen = len(instring)

+            while loc < instrlen and instring[loc] in wt:

+                loc += 1

+

+        return loc

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        return loc, []

+

+    def postParse( self, instring, loc, tokenlist ):

+        return tokenlist

+

+    #~ @profile

+    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):

+        debugging = ( self.debug ) #and doActions )

+

+        if debugging or self.failAction:

+            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))

+            if (self.debugActions[0] ):

+                self.debugActions[0]( instring, loc, self )

+            if callPreParse and self.callPreparse:

+                preloc = self.preParse( instring, loc )

+            else:

+                preloc = loc

+            tokensStart = preloc

+            try:

+                try:

+                    loc,tokens = self.parseImpl( instring, preloc, doActions )

+                except IndexError:

+                    raise ParseException( instring, len(instring), self.errmsg, self )

+            except ParseBaseException as err:

+                #~ print ("Exception raised:", err)

+                if self.debugActions[2]:

+                    self.debugActions[2]( instring, tokensStart, self, err )

+                if self.failAction:

+                    self.failAction( instring, tokensStart, self, err )

+                raise

+        else:

+            if callPreParse and self.callPreparse:

+                preloc = self.preParse( instring, loc )

+            else:

+                preloc = loc

+            tokensStart = preloc

+            if self.mayIndexError or loc >= len(instring):

+                try:

+                    loc,tokens = self.parseImpl( instring, preloc, doActions )

+                except IndexError:

+                    raise ParseException( instring, len(instring), self.errmsg, self )

+            else:

+                loc,tokens = self.parseImpl( instring, preloc, doActions )

+

+        tokens = self.postParse( instring, loc, tokens )

+

+        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )

+        if self.parseAction and (doActions or self.callDuringTry):

+            if debugging:

+                try:

+                    for fn in self.parseAction:

+                        tokens = fn( instring, tokensStart, retTokens )

+                        if tokens is not None:

+                            retTokens = ParseResults( tokens,

+                                                      self.resultsName,

+                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),

+                                                      modal=self.modalResults )

+                except ParseBaseException as err:

+                    #~ print "Exception raised in user parse action:", err

+                    if (self.debugActions[2] ):

+                        self.debugActions[2]( instring, tokensStart, self, err )

+                    raise

+            else:

+                for fn in self.parseAction:

+                    tokens = fn( instring, tokensStart, retTokens )

+                    if tokens is not None:

+                        retTokens = ParseResults( tokens,

+                                                  self.resultsName,

+                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),

+                                                  modal=self.modalResults )

+

+        if debugging:

+            #~ print ("Matched",self,"->",retTokens.asList())

+            if (self.debugActions[1] ):

+                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )

+

+        return loc, retTokens

+

+    def tryParse( self, instring, loc ):

+        try:

+            return self._parse( instring, loc, doActions=False )[0]

+        except ParseFatalException:

+            raise ParseException( instring, loc, self.errmsg, self)

+    

+    def canParseNext(self, instring, loc):

+        try:

+            self.tryParse(instring, loc)

+        except (ParseException, IndexError):

+            return False

+        else:

+            return True

+

+    class _UnboundedCache(object):

+        def __init__(self):

+            cache = {}

+            self.not_in_cache = not_in_cache = object()

+

+            def get(self, key):

+                return cache.get(key, not_in_cache)

+

+            def set(self, key, value):

+                cache[key] = value

+

+            def clear(self):

+                cache.clear()

+

+            self.get = types.MethodType(get, self)

+            self.set = types.MethodType(set, self)

+            self.clear = types.MethodType(clear, self)

+

+    if _OrderedDict is not None:

+        class _FifoCache(object):

+            def __init__(self, size):

+                self.not_in_cache = not_in_cache = object()

+

+                cache = _OrderedDict()

+

+                def get(self, key):

+                    return cache.get(key, not_in_cache)

+

+                def set(self, key, value):

+                    cache[key] = value

+                    if len(cache) > size:

+                        cache.popitem(False)

+

+                def clear(self):

+                    cache.clear()

+

+                self.get = types.MethodType(get, self)

+                self.set = types.MethodType(set, self)

+                self.clear = types.MethodType(clear, self)

+

+    else:

+        class _FifoCache(object):

+            def __init__(self, size):

+                self.not_in_cache = not_in_cache = object()

+

+                cache = {}

+                key_fifo = collections.deque([], size)

+

+                def get(self, key):

+                    return cache.get(key, not_in_cache)

+

+                def set(self, key, value):

+                    cache[key] = value

+                    if len(cache) > size:

+                        cache.pop(key_fifo.popleft(), None)

+                    key_fifo.append(key)

+

+                def clear(self):

+                    cache.clear()

+                    key_fifo.clear()

+

+                self.get = types.MethodType(get, self)

+                self.set = types.MethodType(set, self)

+                self.clear = types.MethodType(clear, self)

+

+    # argument cache for optimizing repeated calls when backtracking through recursive expressions

+    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail

+    packrat_cache_lock = RLock()

+    packrat_cache_stats = [0, 0]

+

+    # this method gets repeatedly called during backtracking with the same arguments -

+    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression

+    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):

+        HIT, MISS = 0, 1

+        lookup = (self, instring, loc, callPreParse, doActions)

+        with ParserElement.packrat_cache_lock:

+            cache = ParserElement.packrat_cache

+            value = cache.get(lookup)

+            if value is cache.not_in_cache:

+                ParserElement.packrat_cache_stats[MISS] += 1

+                try:

+                    value = self._parseNoCache(instring, loc, doActions, callPreParse)

+                except ParseBaseException as pe:

+                    # cache a copy of the exception, without the traceback

+                    cache.set(lookup, pe.__class__(*pe.args))

+                    raise

+                else:

+                    cache.set(lookup, (value[0], value[1].copy()))

+                    return value

+            else:

+                ParserElement.packrat_cache_stats[HIT] += 1

+                if isinstance(value, Exception):

+                    raise value

+                return (value[0], value[1].copy())

+

+    _parse = _parseNoCache

+

+    @staticmethod

+    def resetCache():

+        ParserElement.packrat_cache.clear()

+        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)

+

+    _packratEnabled = False

+    @staticmethod

+    def enablePackrat(cache_size_limit=128):

+        """Enables "packrat" parsing, which adds memoizing to the parsing logic.

+           Repeated parse attempts at the same string location (which happens

+           often in many complex grammars) can immediately return a cached value,

+           instead of re-executing parsing/validating code.  Memoizing is done of

+           both valid results and parsing exceptions.

+           

+           Parameters:

+            - cache_size_limit - (default=C{128}) - if an integer value is provided

+              will limit the size of the packrat cache; if None is passed, then

+              the cache size will be unbounded; if 0 is passed, the cache will

+              be effectively disabled.

+            

+           This speedup may break existing programs that use parse actions that

+           have side-effects.  For this reason, packrat parsing is disabled when

+           you first import pyparsing.  To activate the packrat feature, your

+           program must call the class method C{ParserElement.enablePackrat()}.  If

+           your program uses C{psyco} to "compile as you go", you must call

+           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,

+           Python will crash.  For best results, call C{enablePackrat()} immediately

+           after importing pyparsing.

+           

+           Example::

+               import pyparsing

+               pyparsing.ParserElement.enablePackrat()

+        """

+        if not ParserElement._packratEnabled:

+            ParserElement._packratEnabled = True

+            if cache_size_limit is None:

+                ParserElement.packrat_cache = ParserElement._UnboundedCache()

+            else:

+                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)

+            ParserElement._parse = ParserElement._parseCache

+

+    def parseString( self, instring, parseAll=False ):

+        """

+        Execute the parse expression with the given string.

+        This is the main interface to the client code, once the complete

+        expression has been built.

+

+        If you want the grammar to require that the entire input string be

+        successfully parsed, then set C{parseAll} to True (equivalent to ending

+        the grammar with C{L{StringEnd()}}).

+

+        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,

+        in order to report proper column numbers in parse actions.

+        If the input string contains tabs and

+        the grammar uses parse actions that use the C{loc} argument to index into the

+        string being parsed, you can ensure you have a consistent view of the input

+        string by:

+         - calling C{parseWithTabs} on your grammar before calling C{parseString}

+           (see L{I{parseWithTabs}<parseWithTabs>})

+         - define your parse action using the full C{(s,loc,toks)} signature, and

+           reference the input string using the parse action's C{s} argument

+         - explictly expand the tabs in your input string before calling

+           C{parseString}

+        

+        Example::

+            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']

+            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text

+        """

+        ParserElement.resetCache()

+        if not self.streamlined:

+            self.streamline()

+            #~ self.saveAsList = True

+        for e in self.ignoreExprs:

+            e.streamline()

+        if not self.keepTabs:

+            instring = instring.expandtabs()

+        try:

+            loc, tokens = self._parse( instring, 0 )

+            if parseAll:

+                loc = self.preParse( instring, loc )

+                se = Empty() + StringEnd()

+                se._parse( instring, loc )

+        except ParseBaseException as exc:

+            if ParserElement.verbose_stacktrace:

+                raise

+            else:

+                # catch and re-raise exception from here, clears out pyparsing internal stack trace

+                raise exc

+        else:

+            return tokens

+

+    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):

+        """

+        Scan the input string for expression matches.  Each match will return the

+        matching tokens, start location, and end location.  May be called with optional

+        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If

+        C{overlap} is specified, then overlapping matches will be reported.

+

+        Note that the start and end locations are reported relative to the string

+        being parsed.  See L{I{parseString}<parseString>} for more information on parsing

+        strings with embedded tabs.

+

+        Example::

+            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"

+            print(source)

+            for tokens,start,end in Word(alphas).scanString(source):

+                print(' '*start + '^'*(end-start))

+                print(' '*start + tokens[0])

+        

+        prints::

+        

+            sldjf123lsdjjkf345sldkjf879lkjsfd987

+            ^^^^^

+            sldjf

+                    ^^^^^^^

+                    lsdjjkf

+                              ^^^^^^

+                              sldkjf

+                                       ^^^^^^

+                                       lkjsfd

+        """

+        if not self.streamlined:

+            self.streamline()

+        for e in self.ignoreExprs:

+            e.streamline()

+

+        if not self.keepTabs:

+            instring = _ustr(instring).expandtabs()

+        instrlen = len(instring)

+        loc = 0

+        preparseFn = self.preParse

+        parseFn = self._parse

+        ParserElement.resetCache()

+        matches = 0

+        try:

+            while loc <= instrlen and matches < maxMatches:

+                try:

+                    preloc = preparseFn( instring, loc )

+                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )

+                except ParseException:

+                    loc = preloc+1

+                else:

+                    if nextLoc > loc:

+                        matches += 1

+                        yield tokens, preloc, nextLoc

+                        if overlap:

+                            nextloc = preparseFn( instring, loc )

+                            if nextloc > loc:

+                                loc = nextLoc

+                            else:

+                                loc += 1

+                        else:

+                            loc = nextLoc

+                    else:

+                        loc = preloc+1

+        except ParseBaseException as exc:

+            if ParserElement.verbose_stacktrace:

+                raise

+            else:

+                # catch and re-raise exception from here, clears out pyparsing internal stack trace

+                raise exc

+

+    def transformString( self, instring ):

+        """

+        Extension to C{L{scanString}}, to modify matching text with modified tokens that may

+        be returned from a parse action.  To use C{transformString}, define a grammar and

+        attach a parse action to it that modifies the returned token list.

+        Invoking C{transformString()} on a target string will then scan for matches,

+        and replace the matched text patterns according to the logic in the parse

+        action.  C{transformString()} returns the resulting transformed string.

+        

+        Example::

+            wd = Word(alphas)

+            wd.setParseAction(lambda toks: toks[0].title())

+            

+            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))

+        Prints::

+            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.

+        """

+        out = []

+        lastE = 0

+        # force preservation of <TAB>s, to minimize unwanted transformation of string, and to

+        # keep string locs straight between transformString and scanString

+        self.keepTabs = True

+        try:

+            for t,s,e in self.scanString( instring ):

+                out.append( instring[lastE:s] )

+                if t:

+                    if isinstance(t,ParseResults):

+                        out += t.asList()

+                    elif isinstance(t,list):

+                        out += t

+                    else:

+                        out.append(t)

+                lastE = e

+            out.append(instring[lastE:])

+            out = [o for o in out if o]

+            return "".join(map(_ustr,_flatten(out)))

+        except ParseBaseException as exc:

+            if ParserElement.verbose_stacktrace:

+                raise

+            else:

+                # catch and re-raise exception from here, clears out pyparsing internal stack trace

+                raise exc

+

+    def searchString( self, instring, maxMatches=_MAX_INT ):

+        """

+        Another extension to C{L{scanString}}, simplifying the access to the tokens found

+        to match the given parse expression.  May be called with optional

+        C{maxMatches} argument, to clip searching after 'n' matches are found.

+        

+        Example::

+            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters

+            cap_word = Word(alphas.upper(), alphas.lower())

+            

+            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))

+        prints::

+            ['More', 'Iron', 'Lead', 'Gold', 'I']

+        """

+        try:

+            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])

+        except ParseBaseException as exc:

+            if ParserElement.verbose_stacktrace:

+                raise

+            else:

+                # catch and re-raise exception from here, clears out pyparsing internal stack trace

+                raise exc

+

+    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):

+        """

+        Generator method to split a string using the given expression as a separator.

+        May be called with optional C{maxsplit} argument, to limit the number of splits;

+        and the optional C{includeSeparators} argument (default=C{False}), if the separating

+        matching text should be included in the split results.

+        

+        Example::        

+            punc = oneOf(list(".,;:/-!?"))

+            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))

+        prints::

+            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']

+        """

+        splits = 0

+        last = 0

+        for t,s,e in self.scanString(instring, maxMatches=maxsplit):

+            yield instring[last:s]

+            if includeSeparators:

+                yield t[0]

+            last = e

+        yield instring[last:]

+

+    def __add__(self, other ):

+        """

+        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement

+        converts them to L{Literal}s by default.

+        

+        Example::

+            greet = Word(alphas) + "," + Word(alphas) + "!"

+            hello = "Hello, World!"

+            print (hello, "->", greet.parseString(hello))

+        Prints::

+            Hello, World! -> ['Hello', ',', 'World', '!']

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return And( [ self, other ] )

+

+    def __radd__(self, other ):

+        """

+        Implementation of + operator when left operand is not a C{L{ParserElement}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return other + self

+

+    def __sub__(self, other):

+        """

+        Implementation of - operator, returns C{L{And}} with error stop

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return And( [ self, And._ErrorStop(), other ] )

+

+    def __rsub__(self, other ):

+        """

+        Implementation of - operator when left operand is not a C{L{ParserElement}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return other - self

+

+    def __mul__(self,other):

+        """

+        Implementation of * operator, allows use of C{expr * 3} in place of

+        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer

+        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples

+        may also include C{None} as in:

+         - C{expr*(n,None)} or C{expr*(n,)} is equivalent

+              to C{expr*n + L{ZeroOrMore}(expr)}

+              (read as "at least n instances of C{expr}")

+         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}

+              (read as "0 to n instances of C{expr}")

+         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}

+         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}

+

+        Note that C{expr*(None,n)} does not raise an exception if

+        more than n exprs exist in the input stream; that is,

+        C{expr*(None,n)} does not enforce a maximum number of expr

+        occurrences.  If this behavior is desired, then write

+        C{expr*(None,n) + ~expr}

+        """

+        if isinstance(other,int):

+            minElements, optElements = other,0

+        elif isinstance(other,tuple):

+            other = (other + (None, None))[:2]

+            if other[0] is None:

+                other = (0, other[1])

+            if isinstance(other[0],int) and other[1] is None:

+                if other[0] == 0:

+                    return ZeroOrMore(self)

+                if other[0] == 1:

+                    return OneOrMore(self)

+                else:

+                    return self*other[0] + ZeroOrMore(self)

+            elif isinstance(other[0],int) and isinstance(other[1],int):

+                minElements, optElements = other

+                optElements -= minElements

+            else:

+                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))

+        else:

+            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))

+

+        if minElements < 0:

+            raise ValueError("cannot multiply ParserElement by negative value")

+        if optElements < 0:

+            raise ValueError("second tuple value must be greater or equal to first tuple value")

+        if minElements == optElements == 0:

+            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")

+

+        if (optElements):

+            def makeOptionalList(n):

+                if n>1:

+                    return Optional(self + makeOptionalList(n-1))

+                else:

+                    return Optional(self)

+            if minElements:

+                if minElements == 1:

+                    ret = self + makeOptionalList(optElements)

+                else:

+                    ret = And([self]*minElements) + makeOptionalList(optElements)

+            else:

+                ret = makeOptionalList(optElements)

+        else:

+            if minElements == 1:

+                ret = self

+            else:

+                ret = And([self]*minElements)

+        return ret

+

+    def __rmul__(self, other):

+        return self.__mul__(other)

+

+    def __or__(self, other ):

+        """

+        Implementation of | operator - returns C{L{MatchFirst}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return MatchFirst( [ self, other ] )

+

+    def __ror__(self, other ):

+        """

+        Implementation of | operator when left operand is not a C{L{ParserElement}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return other | self

+

+    def __xor__(self, other ):

+        """

+        Implementation of ^ operator - returns C{L{Or}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return Or( [ self, other ] )

+

+    def __rxor__(self, other ):

+        """

+        Implementation of ^ operator when left operand is not a C{L{ParserElement}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return other ^ self

+

+    def __and__(self, other ):

+        """

+        Implementation of & operator - returns C{L{Each}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return Each( [ self, other ] )

+

+    def __rand__(self, other ):

+        """

+        Implementation of & operator when left operand is not a C{L{ParserElement}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return other & self

+

+    def __invert__( self ):

+        """

+        Implementation of ~ operator - returns C{L{NotAny}}

+        """

+        return NotAny( self )

+

+    def __call__(self, name=None):

+        """

+        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.

+        

+        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be

+        passed as C{True}.

+           

+        If C{name} is omitted, same as calling C{L{copy}}.

+

+        Example::

+            # these are equivalent

+            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")

+            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             

+        """

+        if name is not None:

+            return self.setResultsName(name)

+        else:

+            return self.copy()

+

+    def suppress( self ):

+        """

+        Suppresses the output of this C{ParserElement}; useful to keep punctuation from

+        cluttering up returned output.

+        """

+        return Suppress( self )

+

+    def leaveWhitespace( self ):

+        """

+        Disables the skipping of whitespace before matching the characters in the

+        C{ParserElement}'s defined pattern.  This is normally only used internally by

+        the pyparsing module, but may be needed in some whitespace-sensitive grammars.

+        """

+        self.skipWhitespace = False

+        return self

+

+    def setWhitespaceChars( self, chars ):

+        """

+        Overrides the default whitespace chars

+        """

+        self.skipWhitespace = True

+        self.whiteChars = chars

+        self.copyDefaultWhiteChars = False

+        return self

+

+    def parseWithTabs( self ):

+        """

+        Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.

+        Must be called before C{parseString} when the input grammar contains elements that

+        match C{<TAB>} characters.

+        """

+        self.keepTabs = True

+        return self

+

+    def ignore( self, other ):

+        """

+        Define expression to be ignored (e.g., comments) while doing pattern

+        matching; may be called repeatedly, to define multiple comment or other

+        ignorable patterns.

+        

+        Example::

+            patt = OneOrMore(Word(alphas))

+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']

+            

+            patt.ignore(cStyleComment)

+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']

+        """

+        if isinstance(other, basestring):

+            other = Suppress(other)

+

+        if isinstance( other, Suppress ):

+            if other not in self.ignoreExprs:

+                self.ignoreExprs.append(other)

+        else:

+            self.ignoreExprs.append( Suppress( other.copy() ) )

+        return self

+

+    def setDebugActions( self, startAction, successAction, exceptionAction ):

+        """

+        Enable display of debugging messages while doing pattern matching.

+        """

+        self.debugActions = (startAction or _defaultStartDebugAction,

+                             successAction or _defaultSuccessDebugAction,

+                             exceptionAction or _defaultExceptionDebugAction)

+        self.debug = True

+        return self

+

+    def setDebug( self, flag=True ):

+        """

+        Enable display of debugging messages while doing pattern matching.

+        Set C{flag} to True to enable, False to disable.

+

+        Example::

+            wd = Word(alphas).setName("alphaword")

+            integer = Word(nums).setName("numword")

+            term = wd | integer

+            

+            # turn on debugging for wd

+            wd.setDebug()

+

+            OneOrMore(term).parseString("abc 123 xyz 890")

+        

+        prints::

+            Match alphaword at loc 0(1,1)

+            Matched alphaword -> ['abc']

+            Match alphaword at loc 3(1,4)

+            Exception raised:Expected alphaword (at char 4), (line:1, col:5)

+            Match alphaword at loc 7(1,8)

+            Matched alphaword -> ['xyz']

+            Match alphaword at loc 11(1,12)

+            Exception raised:Expected alphaword (at char 12), (line:1, col:13)

+            Match alphaword at loc 15(1,16)

+            Exception raised:Expected alphaword (at char 15), (line:1, col:16)

+

+        The output shown is that produced by the default debug actions - custom debug actions can be

+        specified using L{setDebugActions}. Prior to attempting

+        to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}

+        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}

+        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,

+        which makes debugging and exception messages easier to understand - for instance, the default

+        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.

+        """

+        if flag:

+            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )

+        else:

+            self.debug = False

+        return self

+

+    def __str__( self ):

+        return self.name

+

+    def __repr__( self ):

+        return _ustr(self)

+

+    def streamline( self ):

+        self.streamlined = True

+        self.strRepr = None

+        return self

+

+    def checkRecursion( self, parseElementList ):

+        pass

+

+    def validate( self, validateTrace=[] ):

+        """

+        Check defined expressions for valid structure, check for infinite recursive definitions.

+        """

+        self.checkRecursion( [] )

+

+    def parseFile( self, file_or_filename, parseAll=False ):

+        """

+        Execute the parse expression on the given file or filename.

+        If a filename is specified (instead of a file object),

+        the entire file is opened, read, and closed before parsing.

+        """

+        try:

+            file_contents = file_or_filename.read()

+        except AttributeError:

+            with open(file_or_filename, "r") as f:

+                file_contents = f.read()

+        try:

+            return self.parseString(file_contents, parseAll)

+        except ParseBaseException as exc:

+            if ParserElement.verbose_stacktrace:

+                raise

+            else:

+                # catch and re-raise exception from here, clears out pyparsing internal stack trace

+                raise exc

+

+    def __eq__(self,other):

+        if isinstance(other, ParserElement):

+            return self is other or vars(self) == vars(other)

+        elif isinstance(other, basestring):

+            return self.matches(other)

+        else:

+            return super(ParserElement,self)==other

+

+    def __ne__(self,other):

+        return not (self == other)

+

+    def __hash__(self):

+        return hash(id(self))

+

+    def __req__(self,other):

+        return self == other

+

+    def __rne__(self,other):

+        return not (self == other)

+

+    def matches(self, testString, parseAll=True):

+        """

+        Method for quick testing of a parser against a test string. Good for simple 

+        inline microtests of sub expressions while building up larger parser.

+           

+        Parameters:

+         - testString - to test against this expression for a match

+         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests

+            

+        Example::

+            expr = Word(nums)

+            assert expr.matches("100")

+        """

+        try:

+            self.parseString(_ustr(testString), parseAll=parseAll)

+            return True

+        except ParseBaseException:

+            return False

+                

+    def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):

+        """

+        Execute the parse expression on a series of test strings, showing each

+        test, the parsed results or where the parse failed. Quick and easy way to

+        run a parse expression against a list of sample strings.

+           

+        Parameters:

+         - tests - a list of separate test strings, or a multiline string of test strings

+         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           

+         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 

+              string; pass None to disable comment filtering

+         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;

+              if False, only dump nested list

+         - printResults - (default=C{True}) prints test output to stdout

+         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing

+

+        Returns: a (success, results) tuple, where success indicates that all tests succeeded

+        (or failed if C{failureTests} is True), and the results contain a list of lines of each 

+        test's output

+        

+        Example::

+            number_expr = pyparsing_common.number.copy()

+

+            result = number_expr.runTests('''

+                # unsigned integer

+                100

+                # negative integer

+                -100

+                # float with scientific notation

+                6.02e23

+                # integer with scientific notation

+                1e-12

+                ''')

+            print("Success" if result[0] else "Failed!")

+

+            result = number_expr.runTests('''

+                # stray character

+                100Z

+                # missing leading digit before '.'

+                -.100

+                # too many '.'

+                3.14.159

+                ''', failureTests=True)

+            print("Success" if result[0] else "Failed!")

+        prints::

+            # unsigned integer

+            100

+            [100]

+

+            # negative integer

+            -100

+            [-100]

+

+            # float with scientific notation

+            6.02e23

+            [6.02e+23]

+

+            # integer with scientific notation

+            1e-12

+            [1e-12]

+

+            Success

+            

+            # stray character

+            100Z

+               ^

+            FAIL: Expected end of text (at char 3), (line:1, col:4)

+

+            # missing leading digit before '.'

+            -.100

+            ^

+            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)

+

+            # too many '.'

+            3.14.159

+                ^

+            FAIL: Expected end of text (at char 4), (line:1, col:5)

+

+            Success

+

+        Each test string must be on a single line. If you want to test a string that spans multiple

+        lines, create a test like this::

+

+            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")

+        

+        (Note that this is a raw string literal, you must include the leading 'r'.)

+        """

+        if isinstance(tests, basestring):

+            tests = list(map(str.strip, tests.rstrip().splitlines()))

+        if isinstance(comment, basestring):

+            comment = Literal(comment)

+        allResults = []

+        comments = []

+        success = True

+        for t in tests:

+            if comment is not None and comment.matches(t, False) or comments and not t:

+                comments.append(t)

+                continue

+            if not t:

+                continue

+            out = ['\n'.join(comments), t]

+            comments = []

+            try:

+                t = t.replace(r'\n','\n')

+                result = self.parseString(t, parseAll=parseAll)

+                out.append(result.dump(full=fullDump))

+                success = success and not failureTests

+            except ParseBaseException as pe:

+                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""

+                if '\n' in t:

+                    out.append(line(pe.loc, t))

+                    out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)

+                else:

+                    out.append(' '*pe.loc + '^' + fatal)

+                out.append("FAIL: " + str(pe))

+                success = success and failureTests

+                result = pe

+            except Exception as exc:

+                out.append("FAIL-EXCEPTION: " + str(exc))

+                success = success and failureTests

+                result = exc

+

+            if printResults:

+                if fullDump:

+                    out.append('')

+                print('\n'.join(out))

+

+            allResults.append((t, result))

+        

+        return success, allResults

+

+        

+class Token(ParserElement):

+    """

+    Abstract C{ParserElement} subclass, for defining atomic matching patterns.

+    """

+    def __init__( self ):

+        super(Token,self).__init__( savelist=False )

+

+

+class Empty(Token):

+    """

+    An empty token, will always match.

+    """

+    def __init__( self ):

+        super(Empty,self).__init__()

+        self.name = "Empty"

+        self.mayReturnEmpty = True

+        self.mayIndexError = False

+

+

+class NoMatch(Token):

+    """

+    A token that will never match.

+    """

+    def __init__( self ):

+        super(NoMatch,self).__init__()

+        self.name = "NoMatch"

+        self.mayReturnEmpty = True

+        self.mayIndexError = False

+        self.errmsg = "Unmatchable token"

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        raise ParseException(instring, loc, self.errmsg, self)

+

+

+class Literal(Token):

+    """

+    Token to exactly match a specified string.

+    

+    Example::

+        Literal('blah').parseString('blah')  # -> ['blah']

+        Literal('blah').parseString('blahfooblah')  # -> ['blah']

+        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"

+    

+    For case-insensitive matching, use L{CaselessLiteral}.

+    

+    For keyword matching (force word break before and after the matched string),

+    use L{Keyword} or L{CaselessKeyword}.

+    """

+    def __init__( self, matchString ):

+        super(Literal,self).__init__()

+        self.match = matchString

+        self.matchLen = len(matchString)

+        try:

+            self.firstMatchChar = matchString[0]

+        except IndexError:

+            warnings.warn("null string passed to Literal; use Empty() instead",

+                            SyntaxWarning, stacklevel=2)

+            self.__class__ = Empty

+        self.name = '"%s"' % _ustr(self.match)

+        self.errmsg = "Expected " + self.name

+        self.mayReturnEmpty = False

+        self.mayIndexError = False

+

+    # Performance tuning: this routine gets called a *lot*

+    # if this is a single character match string  and the first character matches,

+    # short-circuit as quickly as possible, and avoid calling startswith

+    #~ @profile

+    def parseImpl( self, instring, loc, doActions=True ):

+        if (instring[loc] == self.firstMatchChar and

+            (self.matchLen==1 or instring.startswith(self.match,loc)) ):

+            return loc+self.matchLen, self.match

+        raise ParseException(instring, loc, self.errmsg, self)

+_L = Literal

+ParserElement._literalStringClass = Literal

+

+class Keyword(Token):

+    """

+    Token to exactly match a specified string as a keyword, that is, it must be

+    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:

+     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.

+     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}

+    Accepts two optional constructor arguments in addition to the keyword string:

+     - C{identChars} is a string of characters that would be valid identifier characters,

+          defaulting to all alphanumerics + "_" and "$"

+     - C{caseless} allows case-insensitive matching, default is C{False}.

+       

+    Example::

+        Keyword("start").parseString("start")  # -> ['start']

+        Keyword("start").parseString("starting")  # -> Exception

+

+    For case-insensitive matching, use L{CaselessKeyword}.

+    """

+    DEFAULT_KEYWORD_CHARS = alphanums+"_$"

+

+    def __init__( self, matchString, identChars=None, caseless=False ):

+        super(Keyword,self).__init__()

+        if identChars is None:

+            identChars = Keyword.DEFAULT_KEYWORD_CHARS

+        self.match = matchString

+        self.matchLen = len(matchString)

+        try:

+            self.firstMatchChar = matchString[0]

+        except IndexError:

+            warnings.warn("null string passed to Keyword; use Empty() instead",

+                            SyntaxWarning, stacklevel=2)

+        self.name = '"%s"' % self.match

+        self.errmsg = "Expected " + self.name

+        self.mayReturnEmpty = False

+        self.mayIndexError = False

+        self.caseless = caseless

+        if caseless:

+            self.caselessmatch = matchString.upper()

+            identChars = identChars.upper()

+        self.identChars = set(identChars)

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if self.caseless:

+            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and

+                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and

+                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):

+                return loc+self.matchLen, self.match

+        else:

+            if (instring[loc] == self.firstMatchChar and

+                (self.matchLen==1 or instring.startswith(self.match,loc)) and

+                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and

+                (loc == 0 or instring[loc-1] not in self.identChars) ):

+                return loc+self.matchLen, self.match

+        raise ParseException(instring, loc, self.errmsg, self)

+

+    def copy(self):

+        c = super(Keyword,self).copy()

+        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS

+        return c

+

+    @staticmethod

+    def setDefaultKeywordChars( chars ):

+        """Overrides the default Keyword chars

+        """

+        Keyword.DEFAULT_KEYWORD_CHARS = chars

+

+class CaselessLiteral(Literal):

+    """

+    Token to match a specified string, ignoring case of letters.

+    Note: the matched results will always be in the case of the given

+    match string, NOT the case of the input text.

+

+    Example::

+        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']

+        

+    (Contrast with example for L{CaselessKeyword}.)

+    """

+    def __init__( self, matchString ):

+        super(CaselessLiteral,self).__init__( matchString.upper() )

+        # Preserve the defining literal.

+        self.returnString = matchString

+        self.name = "'%s'" % self.returnString

+        self.errmsg = "Expected " + self.name

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if instring[ loc:loc+self.matchLen ].upper() == self.match:

+            return loc+self.matchLen, self.returnString

+        raise ParseException(instring, loc, self.errmsg, self)

+

+class CaselessKeyword(Keyword):

+    """

+    Caseless version of L{Keyword}.

+

+    Example::

+        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']

+        

+    (Contrast with example for L{CaselessLiteral}.)

+    """

+    def __init__( self, matchString, identChars=None ):

+        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and

+             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):

+            return loc+self.matchLen, self.match

+        raise ParseException(instring, loc, self.errmsg, self)

+

+class CloseMatch(Token):

+    """

+    A variation on L{Literal} which matches "close" matches, that is, 

+    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:

+     - C{match_string} - string to be matched

+     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match

+    

+    The results from a successful parse will contain the matched text from the input string and the following named results:

+     - C{mismatches} - a list of the positions within the match_string where mismatches were found

+     - C{original} - the original match_string used to compare against the input string

+    

+    If C{mismatches} is an empty list, then the match was an exact match.

+    

+    Example::

+        patt = CloseMatch("ATCATCGAATGGA")

+        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})

+        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)

+

+        # exact match

+        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})

+

+        # close match allowing up to 2 mismatches

+        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)

+        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})

+    """

+    def __init__(self, match_string, maxMismatches=1):

+        super(CloseMatch,self).__init__()

+        self.name = match_string

+        self.match_string = match_string

+        self.maxMismatches = maxMismatches

+        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)

+        self.mayIndexError = False

+        self.mayReturnEmpty = False

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        start = loc

+        instrlen = len(instring)

+        maxloc = start + len(self.match_string)

+

+        if maxloc <= instrlen:

+            match_string = self.match_string

+            match_stringloc = 0

+            mismatches = []

+            maxMismatches = self.maxMismatches

+

+            for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):

+                src,mat = s_m

+                if src != mat:

+                    mismatches.append(match_stringloc)

+                    if len(mismatches) > maxMismatches:

+                        break

+            else:

+                loc = match_stringloc + 1

+                results = ParseResults([instring[start:loc]])

+                results['original'] = self.match_string

+                results['mismatches'] = mismatches

+                return loc, results

+

+        raise ParseException(instring, loc, self.errmsg, self)

+

+

+class Word(Token):

+    """

+    Token for matching words composed of allowed character sets.

+    Defined with string containing all allowed initial characters,

+    an optional string containing allowed body characters (if omitted,

+    defaults to the initial character set), and an optional minimum,

+    maximum, and/or exact length.  The default value for C{min} is 1 (a

+    minimum value < 1 is not valid); the default values for C{max} and C{exact}

+    are 0, meaning no maximum or exact length restriction. An optional

+    C{excludeChars} parameter can list characters that might be found in 

+    the input C{bodyChars} string; useful to define a word of all printables

+    except for one or two characters, for instance.

+    

+    L{srange} is useful for defining custom character set strings for defining 

+    C{Word} expressions, using range notation from regular expression character sets.

+    

+    A common mistake is to use C{Word} to match a specific literal string, as in 

+    C{Word("Address")}. Remember that C{Word} uses the string argument to define

+    I{sets} of matchable characters. This expression would match "Add", "AAA",

+    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.

+    To match an exact literal string, use L{Literal} or L{Keyword}.

+

+    pyparsing includes helper strings for building Words:

+     - L{alphas}

+     - L{nums}

+     - L{alphanums}

+     - L{hexnums}

+     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)

+     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)

+     - L{printables} (any non-whitespace character)

+

+    Example::

+        # a word composed of digits

+        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))

+        

+        # a word with a leading capital, and zero or more lowercase

+        capital_word = Word(alphas.upper(), alphas.lower())

+

+        # hostnames are alphanumeric, with leading alpha, and '-'

+        hostname = Word(alphas, alphanums+'-')

+        

+        # roman numeral (not a strict parser, accepts invalid mix of characters)

+        roman = Word("IVXLCDM")

+        

+        # any string of non-whitespace characters, except for ','

+        csv_value = Word(printables, excludeChars=",")

+    """

+    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):

+        super(Word,self).__init__()

+        if excludeChars:

+            initChars = ''.join(c for c in initChars if c not in excludeChars)

+            if bodyChars:

+                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)

+        self.initCharsOrig = initChars

+        self.initChars = set(initChars)

+        if bodyChars :

+            self.bodyCharsOrig = bodyChars

+            self.bodyChars = set(bodyChars)

+        else:

+            self.bodyCharsOrig = initChars

+            self.bodyChars = set(initChars)

+

+        self.maxSpecified = max > 0

+

+        if min < 1:

+            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")

+

+        self.minLen = min

+

+        if max > 0:

+            self.maxLen = max

+        else:

+            self.maxLen = _MAX_INT

+

+        if exact > 0:

+            self.maxLen = exact

+            self.minLen = exact

+

+        self.name = _ustr(self)

+        self.errmsg = "Expected " + self.name

+        self.mayIndexError = False

+        self.asKeyword = asKeyword

+

+        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):

+            if self.bodyCharsOrig == self.initCharsOrig:

+                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)

+            elif len(self.initCharsOrig) == 1:

+                self.reString = "%s[%s]*" % \

+                                      (re.escape(self.initCharsOrig),

+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)

+            else:

+                self.reString = "[%s][%s]*" % \

+                                      (_escapeRegexRangeChars(self.initCharsOrig),

+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)

+            if self.asKeyword:

+                self.reString = r"\b"+self.reString+r"\b"

+            try:

+                self.re = re.compile( self.reString )

+            except Exception:

+                self.re = None

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if self.re:

+            result = self.re.match(instring,loc)

+            if not result:

+                raise ParseException(instring, loc, self.errmsg, self)

+

+            loc = result.end()

+            return loc, result.group()

+

+        if not(instring[ loc ] in self.initChars):

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        start = loc

+        loc += 1

+        instrlen = len(instring)

+        bodychars = self.bodyChars

+        maxloc = start + self.maxLen

+        maxloc = min( maxloc, instrlen )

+        while loc < maxloc and instring[loc] in bodychars:

+            loc += 1

+

+        throwException = False

+        if loc - start < self.minLen:

+            throwException = True

+        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:

+            throwException = True

+        if self.asKeyword:

+            if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):

+                throwException = True

+

+        if throwException:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        return loc, instring[start:loc]

+

+    def __str__( self ):

+        try:

+            return super(Word,self).__str__()

+        except Exception:

+            pass

+

+

+        if self.strRepr is None:

+

+            def charsAsStr(s):

+                if len(s)>4:

+                    return s[:4]+"..."

+                else:

+                    return s

+

+            if ( self.initCharsOrig != self.bodyCharsOrig ):

+                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )

+            else:

+                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)

+

+        return self.strRepr

+

+

+class Regex(Token):

+    """

+    Token for matching strings that match a given regular expression.

+    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.

+    If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as 

+    named parse results.

+

+    Example::

+        realnum = Regex(r"[+-]?\d+\.\d*")

+        date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')

+        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression

+        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")

+    """

+    compiledREtype = type(re.compile("[A-Z]"))

+    def __init__( self, pattern, flags=0):

+        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""

+        super(Regex,self).__init__()

+

+        if isinstance(pattern, basestring):

+            if not pattern:

+                warnings.warn("null string passed to Regex; use Empty() instead",

+                        SyntaxWarning, stacklevel=2)

+

+            self.pattern = pattern

+            self.flags = flags

+

+            try:

+                self.re = re.compile(self.pattern, self.flags)

+                self.reString = self.pattern

+            except sre_constants.error:

+                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,

+                    SyntaxWarning, stacklevel=2)

+                raise

+

+        elif isinstance(pattern, Regex.compiledREtype):

+            self.re = pattern

+            self.pattern = \

+            self.reString = str(pattern)

+            self.flags = flags

+            

+        else:

+            raise ValueError("Regex may only be constructed with a string or a compiled RE object")

+

+        self.name = _ustr(self)

+        self.errmsg = "Expected " + self.name

+        self.mayIndexError = False

+        self.mayReturnEmpty = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        result = self.re.match(instring,loc)

+        if not result:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        loc = result.end()

+        d = result.groupdict()

+        ret = ParseResults(result.group())

+        if d:

+            for k in d:

+                ret[k] = d[k]

+        return loc,ret

+

+    def __str__( self ):

+        try:

+            return super(Regex,self).__str__()

+        except Exception:

+            pass

+

+        if self.strRepr is None:

+            self.strRepr = "Re:(%s)" % repr(self.pattern)

+

+        return self.strRepr

+

+

+class QuotedString(Token):

+    r"""

+    Token for matching strings that are delimited by quoting characters.

+    

+    Defined with the following parameters:

+        - quoteChar - string of one or more characters defining the quote delimiting string

+        - escChar - character to escape quotes, typically backslash (default=C{None})

+        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})

+        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})

+        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})

+        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)

+        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})

+

+    Example::

+        qs = QuotedString('"')

+        print(qs.searchString('lsjdf "This is the quote" sldjf'))

+        complex_qs = QuotedString('{{', endQuoteChar='}}')

+        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))

+        sql_qs = QuotedString('"', escQuote='""')

+        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))

+    prints::

+        [['This is the quote']]

+        [['This is the "quote"']]

+        [['This is the quote with "embedded" quotes']]

+    """

+    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):

+        super(QuotedString,self).__init__()

+

+        # remove white space from quote chars - wont work anyway

+        quoteChar = quoteChar.strip()

+        if not quoteChar:

+            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)

+            raise SyntaxError()

+

+        if endQuoteChar is None:

+            endQuoteChar = quoteChar

+        else:

+            endQuoteChar = endQuoteChar.strip()

+            if not endQuoteChar:

+                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)

+                raise SyntaxError()

+

+        self.quoteChar = quoteChar

+        self.quoteCharLen = len(quoteChar)

+        self.firstQuoteChar = quoteChar[0]

+        self.endQuoteChar = endQuoteChar

+        self.endQuoteCharLen = len(endQuoteChar)

+        self.escChar = escChar

+        self.escQuote = escQuote

+        self.unquoteResults = unquoteResults

+        self.convertWhitespaceEscapes = convertWhitespaceEscapes

+

+        if multiline:

+            self.flags = re.MULTILINE | re.DOTALL

+            self.pattern = r'%s(?:[^%s%s]' % \

+                ( re.escape(self.quoteChar),

+                  _escapeRegexRangeChars(self.endQuoteChar[0]),

+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )

+        else:

+            self.flags = 0

+            self.pattern = r'%s(?:[^%s\n\r%s]' % \

+                ( re.escape(self.quoteChar),

+                  _escapeRegexRangeChars(self.endQuoteChar[0]),

+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )

+        if len(self.endQuoteChar) > 1:

+            self.pattern += (

+                '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),

+                                               _escapeRegexRangeChars(self.endQuoteChar[i]))

+                                    for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'

+                )

+        if escQuote:

+            self.pattern += (r'|(?:%s)' % re.escape(escQuote))

+        if escChar:

+            self.pattern += (r'|(?:%s.)' % re.escape(escChar))

+            self.escCharReplacePattern = re.escape(self.escChar)+"(.)"

+        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))

+

+        try:

+            self.re = re.compile(self.pattern, self.flags)

+            self.reString = self.pattern

+        except sre_constants.error:

+            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,

+                SyntaxWarning, stacklevel=2)

+            raise

+

+        self.name = _ustr(self)

+        self.errmsg = "Expected " + self.name

+        self.mayIndexError = False

+        self.mayReturnEmpty = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None

+        if not result:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        loc = result.end()

+        ret = result.group()

+

+        if self.unquoteResults:

+

+            # strip off quotes

+            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]

+

+            if isinstance(ret,basestring):

+                # replace escaped whitespace

+                if '\\' in ret and self.convertWhitespaceEscapes:

+                    ws_map = {

+                        r'\t' : '\t',

+                        r'\n' : '\n',

+                        r'\f' : '\f',

+                        r'\r' : '\r',

+                    }

+                    for wslit,wschar in ws_map.items():

+                        ret = ret.replace(wslit, wschar)

+

+                # replace escaped characters

+                if self.escChar:

+                    ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)

+

+                # replace escaped quotes

+                if self.escQuote:

+                    ret = ret.replace(self.escQuote, self.endQuoteChar)

+

+        return loc, ret

+

+    def __str__( self ):

+        try:

+            return super(QuotedString,self).__str__()

+        except Exception:

+            pass

+

+        if self.strRepr is None:

+            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)

+

+        return self.strRepr

+

+

+class CharsNotIn(Token):

+    """

+    Token for matching words composed of characters I{not} in a given set (will

+    include whitespace in matched characters if not listed in the provided exclusion set - see example).

+    Defined with string containing all disallowed characters, and an optional

+    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a

+    minimum value < 1 is not valid); the default values for C{max} and C{exact}

+    are 0, meaning no maximum or exact length restriction.

+

+    Example::

+        # define a comma-separated-value as anything that is not a ','

+        csv_value = CharsNotIn(',')

+        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))

+    prints::

+        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']

+    """

+    def __init__( self, notChars, min=1, max=0, exact=0 ):

+        super(CharsNotIn,self).__init__()

+        self.skipWhitespace = False

+        self.notChars = notChars

+

+        if min < 1:

+            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")

+

+        self.minLen = min

+

+        if max > 0:

+            self.maxLen = max

+        else:

+            self.maxLen = _MAX_INT

+

+        if exact > 0:

+            self.maxLen = exact

+            self.minLen = exact

+

+        self.name = _ustr(self)

+        self.errmsg = "Expected " + self.name

+        self.mayReturnEmpty = ( self.minLen == 0 )

+        self.mayIndexError = False

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if instring[loc] in self.notChars:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        start = loc

+        loc += 1

+        notchars = self.notChars

+        maxlen = min( start+self.maxLen, len(instring) )

+        while loc < maxlen and \

+              (instring[loc] not in notchars):

+            loc += 1

+

+        if loc - start < self.minLen:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        return loc, instring[start:loc]

+

+    def __str__( self ):

+        try:

+            return super(CharsNotIn, self).__str__()

+        except Exception:

+            pass

+

+        if self.strRepr is None:

+            if len(self.notChars) > 4:

+                self.strRepr = "!W:(%s...)" % self.notChars[:4]

+            else:

+                self.strRepr = "!W:(%s)" % self.notChars

+

+        return self.strRepr

+

+class White(Token):

+    """

+    Special matching class for matching whitespace.  Normally, whitespace is ignored

+    by pyparsing grammars.  This class is included when some whitespace structures

+    are significant.  Define with a string containing the whitespace characters to be

+    matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,

+    as defined for the C{L{Word}} class.

+    """

+    whiteStrs = {

+        " " : "<SPC>",

+        "\t": "<TAB>",

+        "\n": "<LF>",

+        "\r": "<CR>",

+        "\f": "<FF>",

+        }

+    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):

+        super(White,self).__init__()

+        self.matchWhite = ws

+        self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )

+        #~ self.leaveWhitespace()

+        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))

+        self.mayReturnEmpty = True

+        self.errmsg = "Expected " + self.name

+

+        self.minLen = min

+

+        if max > 0:

+            self.maxLen = max

+        else:

+            self.maxLen = _MAX_INT

+

+        if exact > 0:

+            self.maxLen = exact

+            self.minLen = exact

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if not(instring[ loc ] in self.matchWhite):

+            raise ParseException(instring, loc, self.errmsg, self)

+        start = loc

+        loc += 1

+        maxloc = start + self.maxLen

+        maxloc = min( maxloc, len(instring) )

+        while loc < maxloc and instring[loc] in self.matchWhite:

+            loc += 1

+

+        if loc - start < self.minLen:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        return loc, instring[start:loc]

+

+

+class _PositionToken(Token):

+    def __init__( self ):

+        super(_PositionToken,self).__init__()

+        self.name=self.__class__.__name__

+        self.mayReturnEmpty = True

+        self.mayIndexError = False

+

+class GoToColumn(_PositionToken):

+    """

+    Token to advance to a specific column of input text; useful for tabular report scraping.

+    """

+    def __init__( self, colno ):

+        super(GoToColumn,self).__init__()

+        self.col = colno

+

+    def preParse( self, instring, loc ):

+        if col(loc,instring) != self.col:

+            instrlen = len(instring)

+            if self.ignoreExprs:

+                loc = self._skipIgnorables( instring, loc )

+            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :

+                loc += 1

+        return loc

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        thiscol = col( loc, instring )

+        if thiscol > self.col:

+            raise ParseException( instring, loc, "Text not in expected column", self )

+        newloc = loc + self.col - thiscol

+        ret = instring[ loc: newloc ]

+        return newloc, ret

+

+

+class LineStart(_PositionToken):

+    """

+    Matches if current position is at the beginning of a line within the parse string

+    

+    Example::

+    

+        test = '''\

+        AAA this line

+        AAA and this line

+          AAA but not this one

+        B AAA and definitely not this one

+        '''

+

+        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):

+            print(t)

+    

+    Prints::

+        ['AAA', ' this line']

+        ['AAA', ' and this line']    

+

+    """

+    def __init__( self ):

+        super(LineStart,self).__init__()

+        self.errmsg = "Expected start of line"

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if col(loc, instring) == 1:

+            return loc, []

+        raise ParseException(instring, loc, self.errmsg, self)

+

+class LineEnd(_PositionToken):

+    """

+    Matches if current position is at the end of a line within the parse string

+    """

+    def __init__( self ):

+        super(LineEnd,self).__init__()

+        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )

+        self.errmsg = "Expected end of line"

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if loc<len(instring):

+            if instring[loc] == "\n":

+                return loc+1, "\n"

+            else:

+                raise ParseException(instring, loc, self.errmsg, self)

+        elif loc == len(instring):

+            return loc+1, []

+        else:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+class StringStart(_PositionToken):

+    """

+    Matches if current position is at the beginning of the parse string

+    """

+    def __init__( self ):

+        super(StringStart,self).__init__()

+        self.errmsg = "Expected start of text"

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if loc != 0:

+            # see if entire string up to here is just whitespace and ignoreables

+            if loc != self.preParse( instring, 0 ):

+                raise ParseException(instring, loc, self.errmsg, self)

+        return loc, []

+

+class StringEnd(_PositionToken):

+    """

+    Matches if current position is at the end of the parse string

+    """

+    def __init__( self ):

+        super(StringEnd,self).__init__()

+        self.errmsg = "Expected end of text"

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if loc < len(instring):

+            raise ParseException(instring, loc, self.errmsg, self)

+        elif loc == len(instring):

+            return loc+1, []

+        elif loc > len(instring):

+            return loc, []

+        else:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+class WordStart(_PositionToken):

+    """

+    Matches if the current position is at the beginning of a Word, and

+    is not preceded by any character in a given set of C{wordChars}

+    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,

+    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of

+    the string being parsed, or at the beginning of a line.

+    """

+    def __init__(self, wordChars = printables):

+        super(WordStart,self).__init__()

+        self.wordChars = set(wordChars)

+        self.errmsg = "Not at the start of a word"

+

+    def parseImpl(self, instring, loc, doActions=True ):

+        if loc != 0:

+            if (instring[loc-1] in self.wordChars or

+                instring[loc] not in self.wordChars):

+                raise ParseException(instring, loc, self.errmsg, self)

+        return loc, []

+

+class WordEnd(_PositionToken):

+    """

+    Matches if the current position is at the end of a Word, and

+    is not followed by any character in a given set of C{wordChars}

+    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,

+    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of

+    the string being parsed, or at the end of a line.

+    """

+    def __init__(self, wordChars = printables):

+        super(WordEnd,self).__init__()

+        self.wordChars = set(wordChars)

+        self.skipWhitespace = False

+        self.errmsg = "Not at the end of a word"

+

+    def parseImpl(self, instring, loc, doActions=True ):

+        instrlen = len(instring)

+        if instrlen>0 and loc<instrlen:

+            if (instring[loc] in self.wordChars or

+                instring[loc-1] not in self.wordChars):

+                raise ParseException(instring, loc, self.errmsg, self)

+        return loc, []

+

+

+class ParseExpression(ParserElement):

+    """

+    Abstract subclass of ParserElement, for combining and post-processing parsed tokens.

+    """

+    def __init__( self, exprs, savelist = False ):

+        super(ParseExpression,self).__init__(savelist)

+        if isinstance( exprs, _generatorType ):

+            exprs = list(exprs)

+

+        if isinstance( exprs, basestring ):

+            self.exprs = [ ParserElement._literalStringClass( exprs ) ]

+        elif isinstance( exprs, collections.Iterable ):

+            exprs = list(exprs)

+            # if sequence of strings provided, wrap with Literal

+            if all(isinstance(expr, basestring) for expr in exprs):

+                exprs = map(ParserElement._literalStringClass, exprs)

+            self.exprs = list(exprs)

+        else:

+            try:

+                self.exprs = list( exprs )

+            except TypeError:

+                self.exprs = [ exprs ]

+        self.callPreparse = False

+

+    def __getitem__( self, i ):

+        return self.exprs[i]

+

+    def append( self, other ):

+        self.exprs.append( other )

+        self.strRepr = None

+        return self

+

+    def leaveWhitespace( self ):

+        """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on

+           all contained expressions."""

+        self.skipWhitespace = False

+        self.exprs = [ e.copy() for e in self.exprs ]

+        for e in self.exprs:

+            e.leaveWhitespace()

+        return self

+

+    def ignore( self, other ):

+        if isinstance( other, Suppress ):

+            if other not in self.ignoreExprs:

+                super( ParseExpression, self).ignore( other )

+                for e in self.exprs:

+                    e.ignore( self.ignoreExprs[-1] )

+        else:

+            super( ParseExpression, self).ignore( other )

+            for e in self.exprs:

+                e.ignore( self.ignoreExprs[-1] )

+        return self

+

+    def __str__( self ):

+        try:

+            return super(ParseExpression,self).__str__()

+        except Exception:

+            pass

+

+        if self.strRepr is None:

+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )

+        return self.strRepr

+

+    def streamline( self ):

+        super(ParseExpression,self).streamline()

+

+        for e in self.exprs:

+            e.streamline()

+

+        # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )

+        # but only if there are no parse actions or resultsNames on the nested And's

+        # (likewise for Or's and MatchFirst's)

+        if ( len(self.exprs) == 2 ):

+            other = self.exprs[0]

+            if ( isinstance( other, self.__class__ ) and

+                  not(other.parseAction) and

+                  other.resultsName is None and

+                  not other.debug ):

+                self.exprs = other.exprs[:] + [ self.exprs[1] ]

+                self.strRepr = None

+                self.mayReturnEmpty |= other.mayReturnEmpty

+                self.mayIndexError  |= other.mayIndexError

+

+            other = self.exprs[-1]

+            if ( isinstance( other, self.__class__ ) and

+                  not(other.parseAction) and

+                  other.resultsName is None and

+                  not other.debug ):

+                self.exprs = self.exprs[:-1] + other.exprs[:]

+                self.strRepr = None

+                self.mayReturnEmpty |= other.mayReturnEmpty

+                self.mayIndexError  |= other.mayIndexError

+

+        self.errmsg = "Expected " + _ustr(self)

+        

+        return self

+

+    def setResultsName( self, name, listAllMatches=False ):

+        ret = super(ParseExpression,self).setResultsName(name,listAllMatches)

+        return ret

+

+    def validate( self, validateTrace=[] ):

+        tmp = validateTrace[:]+[self]

+        for e in self.exprs:

+            e.validate(tmp)

+        self.checkRecursion( [] )

+        

+    def copy(self):

+        ret = super(ParseExpression,self).copy()

+        ret.exprs = [e.copy() for e in self.exprs]

+        return ret

+

+class And(ParseExpression):

+    """

+    Requires all given C{ParseExpression}s to be found in the given order.

+    Expressions may be separated by whitespace.

+    May be constructed using the C{'+'} operator.

+    May also be constructed using the C{'-'} operator, which will suppress backtracking.

+

+    Example::

+        integer = Word(nums)

+        name_expr = OneOrMore(Word(alphas))

+

+        expr = And([integer("id"),name_expr("name"),integer("age")])

+        # more easily written as:

+        expr = integer("id") + name_expr("name") + integer("age")

+    """

+

+    class _ErrorStop(Empty):

+        def __init__(self, *args, **kwargs):

+            super(And._ErrorStop,self).__init__(*args, **kwargs)

+            self.name = '-'

+            self.leaveWhitespace()

+

+    def __init__( self, exprs, savelist = True ):

+        super(And,self).__init__(exprs, savelist)

+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)

+        self.setWhitespaceChars( self.exprs[0].whiteChars )

+        self.skipWhitespace = self.exprs[0].skipWhitespace

+        self.callPreparse = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        # pass False as last arg to _parse for first element, since we already

+        # pre-parsed the string as part of our And pre-parsing

+        loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )

+        errorStop = False

+        for e in self.exprs[1:]:

+            if isinstance(e, And._ErrorStop):

+                errorStop = True

+                continue

+            if errorStop:

+                try:

+                    loc, exprtokens = e._parse( instring, loc, doActions )

+                except ParseSyntaxException:

+                    raise

+                except ParseBaseException as pe:

+                    pe.__traceback__ = None

+                    raise ParseSyntaxException._from_exception(pe)

+                except IndexError:

+                    raise ParseSyntaxException(instring, len(instring), self.errmsg, self)

+            else:

+                loc, exprtokens = e._parse( instring, loc, doActions )

+            if exprtokens or exprtokens.haskeys():

+                resultlist += exprtokens

+        return loc, resultlist

+

+    def __iadd__(self, other ):

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        return self.append( other ) #And( [ self, other ] )

+

+    def checkRecursion( self, parseElementList ):

+        subRecCheckList = parseElementList[:] + [ self ]

+        for e in self.exprs:

+            e.checkRecursion( subRecCheckList )

+            if not e.mayReturnEmpty:

+                break

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"

+

+        return self.strRepr

+

+

+class Or(ParseExpression):

+    """

+    Requires that at least one C{ParseExpression} is found.

+    If two expressions match, the expression that matches the longest string will be used.

+    May be constructed using the C{'^'} operator.

+

+    Example::

+        # construct Or using '^' operator

+        

+        number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))

+        print(number.searchString("123 3.1416 789"))

+    prints::

+        [['123'], ['3.1416'], ['789']]

+    """

+    def __init__( self, exprs, savelist = False ):

+        super(Or,self).__init__(exprs, savelist)

+        if self.exprs:

+            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)

+        else:

+            self.mayReturnEmpty = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        maxExcLoc = -1

+        maxException = None

+        matches = []

+        for e in self.exprs:

+            try:

+                loc2 = e.tryParse( instring, loc )

+            except ParseException as err:

+                err.__traceback__ = None

+                if err.loc > maxExcLoc:

+                    maxException = err

+                    maxExcLoc = err.loc

+            except IndexError:

+                if len(instring) > maxExcLoc:

+                    maxException = ParseException(instring,len(instring),e.errmsg,self)

+                    maxExcLoc = len(instring)

+            else:

+                # save match among all matches, to retry longest to shortest

+                matches.append((loc2, e))

+

+        if matches:

+            matches.sort(key=lambda x: -x[0])

+            for _,e in matches:

+                try:

+                    return e._parse( instring, loc, doActions )

+                except ParseException as err:

+                    err.__traceback__ = None

+                    if err.loc > maxExcLoc:

+                        maxException = err

+                        maxExcLoc = err.loc

+

+        if maxException is not None:

+            maxException.msg = self.errmsg

+            raise maxException

+        else:

+            raise ParseException(instring, loc, "no defined alternatives to match", self)

+

+

+    def __ixor__(self, other ):

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        return self.append( other ) #Or( [ self, other ] )

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"

+

+        return self.strRepr

+

+    def checkRecursion( self, parseElementList ):

+        subRecCheckList = parseElementList[:] + [ self ]

+        for e in self.exprs:

+            e.checkRecursion( subRecCheckList )

+

+

+class MatchFirst(ParseExpression):

+    """

+    Requires that at least one C{ParseExpression} is found.

+    If two expressions match, the first one listed is the one that will match.

+    May be constructed using the C{'|'} operator.

+

+    Example::

+        # construct MatchFirst using '|' operator

+        

+        # watch the order of expressions to match

+        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))

+        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]

+

+        # put more selective expression first

+        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)

+        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]

+    """

+    def __init__( self, exprs, savelist = False ):

+        super(MatchFirst,self).__init__(exprs, savelist)

+        if self.exprs:

+            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)

+        else:

+            self.mayReturnEmpty = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        maxExcLoc = -1

+        maxException = None

+        for e in self.exprs:

+            try:

+                ret = e._parse( instring, loc, doActions )

+                return ret

+            except ParseException as err:

+                if err.loc > maxExcLoc:

+                    maxException = err

+                    maxExcLoc = err.loc

+            except IndexError:

+                if len(instring) > maxExcLoc:

+                    maxException = ParseException(instring,len(instring),e.errmsg,self)

+                    maxExcLoc = len(instring)

+

+        # only got here if no expression matched, raise exception for match that made it the furthest

+        else:

+            if maxException is not None:

+                maxException.msg = self.errmsg

+                raise maxException

+            else:

+                raise ParseException(instring, loc, "no defined alternatives to match", self)

+

+    def __ior__(self, other ):

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        return self.append( other ) #MatchFirst( [ self, other ] )

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"

+

+        return self.strRepr

+

+    def checkRecursion( self, parseElementList ):

+        subRecCheckList = parseElementList[:] + [ self ]

+        for e in self.exprs:

+            e.checkRecursion( subRecCheckList )

+

+

+class Each(ParseExpression):

+    """

+    Requires all given C{ParseExpression}s to be found, but in any order.

+    Expressions may be separated by whitespace.

+    May be constructed using the C{'&'} operator.

+

+    Example::

+        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")

+        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")

+        integer = Word(nums)

+        shape_attr = "shape:" + shape_type("shape")

+        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")

+        color_attr = "color:" + color("color")

+        size_attr = "size:" + integer("size")

+

+        # use Each (using operator '&') to accept attributes in any order 

+        # (shape and posn are required, color and size are optional)

+        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)

+

+        shape_spec.runTests('''

+            shape: SQUARE color: BLACK posn: 100, 120

+            shape: CIRCLE size: 50 color: BLUE posn: 50,80

+            color:GREEN size:20 shape:TRIANGLE posn:20,40

+            '''

+            )

+    prints::

+        shape: SQUARE color: BLACK posn: 100, 120

+        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]

+        - color: BLACK

+        - posn: ['100', ',', '120']

+          - x: 100

+          - y: 120

+        - shape: SQUARE

+

+

+        shape: CIRCLE size: 50 color: BLUE posn: 50,80

+        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]

+        - color: BLUE

+        - posn: ['50', ',', '80']

+          - x: 50

+          - y: 80

+        - shape: CIRCLE

+        - size: 50

+

+

+        color: GREEN size: 20 shape: TRIANGLE posn: 20,40

+        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]

+        - color: GREEN

+        - posn: ['20', ',', '40']

+          - x: 20

+          - y: 40

+        - shape: TRIANGLE

+        - size: 20

+    """

+    def __init__( self, exprs, savelist = True ):

+        super(Each,self).__init__(exprs, savelist)

+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)

+        self.skipWhitespace = True

+        self.initExprGroups = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if self.initExprGroups:

+            self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))

+            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]

+            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]

+            self.optionals = opt1 + opt2

+            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]

+            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]

+            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]

+            self.required += self.multirequired

+            self.initExprGroups = False

+        tmpLoc = loc

+        tmpReqd = self.required[:]

+        tmpOpt  = self.optionals[:]

+        matchOrder = []

+

+        keepMatching = True

+        while keepMatching:

+            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired

+            failed = []

+            for e in tmpExprs:

+                try:

+                    tmpLoc = e.tryParse( instring, tmpLoc )

+                except ParseException:

+                    failed.append(e)

+                else:

+                    matchOrder.append(self.opt1map.get(id(e),e))

+                    if e in tmpReqd:

+                        tmpReqd.remove(e)

+                    elif e in tmpOpt:

+                        tmpOpt.remove(e)

+            if len(failed) == len(tmpExprs):

+                keepMatching = False

+

+        if tmpReqd:

+            missing = ", ".join(_ustr(e) for e in tmpReqd)

+            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )

+

+        # add any unmatched Optionals, in case they have default values defined

+        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]

+

+        resultlist = []

+        for e in matchOrder:

+            loc,results = e._parse(instring,loc,doActions)

+            resultlist.append(results)

+

+        finalResults = sum(resultlist, ParseResults([]))

+        return loc, finalResults

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"

+

+        return self.strRepr

+

+    def checkRecursion( self, parseElementList ):

+        subRecCheckList = parseElementList[:] + [ self ]

+        for e in self.exprs:

+            e.checkRecursion( subRecCheckList )

+

+

+class ParseElementEnhance(ParserElement):

+    """

+    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.

+    """

+    def __init__( self, expr, savelist=False ):

+        super(ParseElementEnhance,self).__init__(savelist)

+        if isinstance( expr, basestring ):

+            if issubclass(ParserElement._literalStringClass, Token):

+                expr = ParserElement._literalStringClass(expr)

+            else:

+                expr = ParserElement._literalStringClass(Literal(expr))

+        self.expr = expr

+        self.strRepr = None

+        if expr is not None:

+            self.mayIndexError = expr.mayIndexError

+            self.mayReturnEmpty = expr.mayReturnEmpty

+            self.setWhitespaceChars( expr.whiteChars )

+            self.skipWhitespace = expr.skipWhitespace

+            self.saveAsList = expr.saveAsList

+            self.callPreparse = expr.callPreparse

+            self.ignoreExprs.extend(expr.ignoreExprs)

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if self.expr is not None:

+            return self.expr._parse( instring, loc, doActions, callPreParse=False )

+        else:

+            raise ParseException("",loc,self.errmsg,self)

+

+    def leaveWhitespace( self ):

+        self.skipWhitespace = False

+        self.expr = self.expr.copy()

+        if self.expr is not None:

+            self.expr.leaveWhitespace()

+        return self

+

+    def ignore( self, other ):

+        if isinstance( other, Suppress ):

+            if other not in self.ignoreExprs:

+                super( ParseElementEnhance, self).ignore( other )

+                if self.expr is not None:

+                    self.expr.ignore( self.ignoreExprs[-1] )

+        else:

+            super( ParseElementEnhance, self).ignore( other )

+            if self.expr is not None:

+                self.expr.ignore( self.ignoreExprs[-1] )

+        return self

+

+    def streamline( self ):

+        super(ParseElementEnhance,self).streamline()

+        if self.expr is not None:

+            self.expr.streamline()

+        return self

+

+    def checkRecursion( self, parseElementList ):

+        if self in parseElementList:

+            raise RecursiveGrammarException( parseElementList+[self] )

+        subRecCheckList = parseElementList[:] + [ self ]

+        if self.expr is not None:

+            self.expr.checkRecursion( subRecCheckList )

+

+    def validate( self, validateTrace=[] ):

+        tmp = validateTrace[:]+[self]

+        if self.expr is not None:

+            self.expr.validate(tmp)

+        self.checkRecursion( [] )

+

+    def __str__( self ):

+        try:

+            return super(ParseElementEnhance,self).__str__()

+        except Exception:

+            pass

+

+        if self.strRepr is None and self.expr is not None:

+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )

+        return self.strRepr

+

+

+class FollowedBy(ParseElementEnhance):

+    """

+    Lookahead matching of the given parse expression.  C{FollowedBy}

+    does I{not} advance the parsing position within the input string, it only

+    verifies that the specified parse expression matches at the current

+    position.  C{FollowedBy} always returns a null token list.

+

+    Example::

+        # use FollowedBy to match a label only if it is followed by a ':'

+        data_word = Word(alphas)

+        label = data_word + FollowedBy(':')

+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))

+        

+        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()

+    prints::

+        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]

+    """

+    def __init__( self, expr ):

+        super(FollowedBy,self).__init__(expr)

+        self.mayReturnEmpty = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        self.expr.tryParse( instring, loc )

+        return loc, []

+

+

+class NotAny(ParseElementEnhance):

+    """

+    Lookahead to disallow matching with the given parse expression.  C{NotAny}

+    does I{not} advance the parsing position within the input string, it only

+    verifies that the specified parse expression does I{not} match at the current

+    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}

+    always returns a null token list.  May be constructed using the '~' operator.

+

+    Example::

+        

+    """

+    def __init__( self, expr ):

+        super(NotAny,self).__init__(expr)

+        #~ self.leaveWhitespace()

+        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs

+        self.mayReturnEmpty = True

+        self.errmsg = "Found unwanted token, "+_ustr(self.expr)

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if self.expr.canParseNext(instring, loc):

+            raise ParseException(instring, loc, self.errmsg, self)

+        return loc, []

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "~{" + _ustr(self.expr) + "}"

+

+        return self.strRepr

+

+class _MultipleMatch(ParseElementEnhance):

+    def __init__( self, expr, stopOn=None):

+        super(_MultipleMatch, self).__init__(expr)

+        self.saveAsList = True

+        ender = stopOn

+        if isinstance(ender, basestring):

+            ender = ParserElement._literalStringClass(ender)

+        self.not_ender = ~ender if ender is not None else None

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        self_expr_parse = self.expr._parse

+        self_skip_ignorables = self._skipIgnorables

+        check_ender = self.not_ender is not None

+        if check_ender:

+            try_not_ender = self.not_ender.tryParse

+        

+        # must be at least one (but first see if we are the stopOn sentinel;

+        # if so, fail)

+        if check_ender:

+            try_not_ender(instring, loc)

+        loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )

+        try:

+            hasIgnoreExprs = (not not self.ignoreExprs)

+            while 1:

+                if check_ender:

+                    try_not_ender(instring, loc)

+                if hasIgnoreExprs:

+                    preloc = self_skip_ignorables( instring, loc )

+                else:

+                    preloc = loc

+                loc, tmptokens = self_expr_parse( instring, preloc, doActions )

+                if tmptokens or tmptokens.haskeys():

+                    tokens += tmptokens

+        except (ParseException,IndexError):

+            pass

+

+        return loc, tokens

+        

+class OneOrMore(_MultipleMatch):

+    """

+    Repetition of one or more of the given expression.

+    

+    Parameters:

+     - expr - expression that must match one or more times

+     - stopOn - (default=C{None}) - expression for a terminating sentinel

+          (only required if the sentinel would ordinarily match the repetition 

+          expression)          

+

+    Example::

+        data_word = Word(alphas)

+        label = data_word + FollowedBy(':')

+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

+

+        text = "shape: SQUARE posn: upper left color: BLACK"

+        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]

+

+        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data

+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))

+        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]

+        

+        # could also be written as

+        (attr_expr * (1,)).parseString(text).pprint()

+    """

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "{" + _ustr(self.expr) + "}..."

+

+        return self.strRepr

+

+class ZeroOrMore(_MultipleMatch):

+    """

+    Optional repetition of zero or more of the given expression.

+    

+    Parameters:

+     - expr - expression that must match zero or more times

+     - stopOn - (default=C{None}) - expression for a terminating sentinel

+          (only required if the sentinel would ordinarily match the repetition 

+          expression)          

+

+    Example: similar to L{OneOrMore}

+    """

+    def __init__( self, expr, stopOn=None):

+        super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)

+        self.mayReturnEmpty = True

+        

+    def parseImpl( self, instring, loc, doActions=True ):

+        try:

+            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)

+        except (ParseException,IndexError):

+            return loc, []

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "[" + _ustr(self.expr) + "]..."

+

+        return self.strRepr

+

+class _NullToken(object):

+    def __bool__(self):

+        return False

+    __nonzero__ = __bool__

+    def __str__(self):

+        return ""

+

+_optionalNotMatched = _NullToken()

+class Optional(ParseElementEnhance):

+    """

+    Optional matching of the given expression.

+

+    Parameters:

+     - expr - expression that must match zero or more times

+     - default (optional) - value to be returned if the optional expression is not found.

+

+    Example::

+        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier

+        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))

+        zip.runTests('''

+            # traditional ZIP code

+            12345

+            

+            # ZIP+4 form

+            12101-0001

+            

+            # invalid ZIP

+            98765-

+            ''')

+    prints::

+        # traditional ZIP code

+        12345

+        ['12345']

+

+        # ZIP+4 form

+        12101-0001

+        ['12101-0001']

+

+        # invalid ZIP

+        98765-

+             ^

+        FAIL: Expected end of text (at char 5), (line:1, col:6)

+    """

+    def __init__( self, expr, default=_optionalNotMatched ):

+        super(Optional,self).__init__( expr, savelist=False )

+        self.saveAsList = self.expr.saveAsList

+        self.defaultValue = default

+        self.mayReturnEmpty = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        try:

+            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )

+        except (ParseException,IndexError):

+            if self.defaultValue is not _optionalNotMatched:

+                if self.expr.resultsName:

+                    tokens = ParseResults([ self.defaultValue ])

+                    tokens[self.expr.resultsName] = self.defaultValue

+                else:

+                    tokens = [ self.defaultValue ]

+            else:

+                tokens = []

+        return loc, tokens

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "[" + _ustr(self.expr) + "]"

+

+        return self.strRepr

+

+class SkipTo(ParseElementEnhance):

+    """

+    Token for skipping over all undefined text until the matched expression is found.

+

+    Parameters:

+     - expr - target expression marking the end of the data to be skipped

+     - include - (default=C{False}) if True, the target expression is also parsed 

+          (the skipped text and target expression are returned as a 2-element list).

+     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 

+          comments) that might contain false matches to the target expression

+     - failOn - (default=C{None}) define expressions that are not allowed to be 

+          included in the skipped test; if found before the target expression is found, 

+          the SkipTo is not a match

+

+    Example::

+        report = '''

+            Outstanding Issues Report - 1 Jan 2000

+

+               # | Severity | Description                               |  Days Open

+            -----+----------+-------------------------------------------+-----------

+             101 | Critical | Intermittent system crash                 |          6

+              94 | Cosmetic | Spelling error on Login ('log|n')         |         14

+              79 | Minor    | System slow when running too many reports |         47

+            '''

+        integer = Word(nums)

+        SEP = Suppress('|')

+        # use SkipTo to simply match everything up until the next SEP

+        # - ignore quoted strings, so that a '|' character inside a quoted string does not match

+        # - parse action will call token.strip() for each matched token, i.e., the description body

+        string_data = SkipTo(SEP, ignore=quotedString)

+        string_data.setParseAction(tokenMap(str.strip))

+        ticket_expr = (integer("issue_num") + SEP 

+                      + string_data("sev") + SEP 

+                      + string_data("desc") + SEP 

+                      + integer("days_open"))

+        

+        for tkt in ticket_expr.searchString(report):

+            print tkt.dump()

+    prints::

+        ['101', 'Critical', 'Intermittent system crash', '6']

+        - days_open: 6

+        - desc: Intermittent system crash

+        - issue_num: 101

+        - sev: Critical

+        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']

+        - days_open: 14

+        - desc: Spelling error on Login ('log|n')

+        - issue_num: 94

+        - sev: Cosmetic

+        ['79', 'Minor', 'System slow when running too many reports', '47']

+        - days_open: 47

+        - desc: System slow when running too many reports

+        - issue_num: 79

+        - sev: Minor

+    """

+    def __init__( self, other, include=False, ignore=None, failOn=None ):

+        super( SkipTo, self ).__init__( other )

+        self.ignoreExpr = ignore

+        self.mayReturnEmpty = True

+        self.mayIndexError = False

+        self.includeMatch = include

+        self.asList = False

+        if isinstance(failOn, basestring):

+            self.failOn = ParserElement._literalStringClass(failOn)

+        else:

+            self.failOn = failOn

+        self.errmsg = "No match found for "+_ustr(self.expr)

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        startloc = loc

+        instrlen = len(instring)

+        expr = self.expr

+        expr_parse = self.expr._parse

+        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None

+        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None

+        

+        tmploc = loc

+        while tmploc <= instrlen:

+            if self_failOn_canParseNext is not None:

+                # break if failOn expression matches

+                if self_failOn_canParseNext(instring, tmploc):

+                    break

+                    

+            if self_ignoreExpr_tryParse is not None:

+                # advance past ignore expressions

+                while 1:

+                    try:

+                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)

+                    except ParseBaseException:

+                        break

+            

+            try:

+                expr_parse(instring, tmploc, doActions=False, callPreParse=False)

+            except (ParseException, IndexError):

+                # no match, advance loc in string

+                tmploc += 1

+            else:

+                # matched skipto expr, done

+                break

+

+        else:

+            # ran off the end of the input string without matching skipto expr, fail

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        # build up return values

+        loc = tmploc

+        skiptext = instring[startloc:loc]

+        skipresult = ParseResults(skiptext)

+        

+        if self.includeMatch:

+            loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)

+            skipresult += mat

+

+        return loc, skipresult

+

+class Forward(ParseElementEnhance):

+    """

+    Forward declaration of an expression to be defined later -

+    used for recursive grammars, such as algebraic infix notation.

+    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.

+

+    Note: take care when assigning to C{Forward} not to overlook precedence of operators.

+    Specifically, '|' has a lower precedence than '<<', so that::

+        fwdExpr << a | b | c

+    will actually be evaluated as::

+        (fwdExpr << a) | b | c

+    thereby leaving b and c out as parseable alternatives.  It is recommended that you

+    explicitly group the values inserted into the C{Forward}::

+        fwdExpr << (a | b | c)

+    Converting to use the '<<=' operator instead will avoid this problem.

+

+    See L{ParseResults.pprint} for an example of a recursive parser created using

+    C{Forward}.

+    """

+    def __init__( self, other=None ):

+        super(Forward,self).__init__( other, savelist=False )

+

+    def __lshift__( self, other ):

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass(other)

+        self.expr = other

+        self.strRepr = None

+        self.mayIndexError = self.expr.mayIndexError

+        self.mayReturnEmpty = self.expr.mayReturnEmpty

+        self.setWhitespaceChars( self.expr.whiteChars )

+        self.skipWhitespace = self.expr.skipWhitespace

+        self.saveAsList = self.expr.saveAsList

+        self.ignoreExprs.extend(self.expr.ignoreExprs)

+        return self

+        

+    def __ilshift__(self, other):

+        return self << other

+    

+    def leaveWhitespace( self ):

+        self.skipWhitespace = False

+        return self

+

+    def streamline( self ):

+        if not self.streamlined:

+            self.streamlined = True

+            if self.expr is not None:

+                self.expr.streamline()

+        return self

+

+    def validate( self, validateTrace=[] ):

+        if self not in validateTrace:

+            tmp = validateTrace[:]+[self]

+            if self.expr is not None:

+                self.expr.validate(tmp)

+        self.checkRecursion([])

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+        return self.__class__.__name__ + ": ..."

+

+        # stubbed out for now - creates awful memory and perf issues

+        self._revertClass = self.__class__

+        self.__class__ = _ForwardNoRecurse

+        try:

+            if self.expr is not None:

+                retString = _ustr(self.expr)

+            else:

+                retString = "None"

+        finally:

+            self.__class__ = self._revertClass

+        return self.__class__.__name__ + ": " + retString

+

+    def copy(self):

+        if self.expr is not None:

+            return super(Forward,self).copy()

+        else:

+            ret = Forward()

+            ret <<= self

+            return ret

+

+class _ForwardNoRecurse(Forward):

+    def __str__( self ):

+        return "..."

+

+class TokenConverter(ParseElementEnhance):

+    """

+    Abstract subclass of C{ParseExpression}, for converting parsed results.

+    """

+    def __init__( self, expr, savelist=False ):

+        super(TokenConverter,self).__init__( expr )#, savelist )

+        self.saveAsList = False

+

+class Combine(TokenConverter):

+    """

+    Converter to concatenate all matching tokens to a single string.

+    By default, the matching patterns must also be contiguous in the input string;

+    this can be disabled by specifying C{'adjacent=False'} in the constructor.

+

+    Example::

+        real = Word(nums) + '.' + Word(nums)

+        print(real.parseString('3.1416')) # -> ['3', '.', '1416']

+        # will also erroneously match the following

+        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']

+

+        real = Combine(Word(nums) + '.' + Word(nums))

+        print(real.parseString('3.1416')) # -> ['3.1416']

+        # no match when there are internal spaces

+        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)

+    """

+    def __init__( self, expr, joinString="", adjacent=True ):

+        super(Combine,self).__init__( expr )

+        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself

+        if adjacent:

+            self.leaveWhitespace()

+        self.adjacent = adjacent

+        self.skipWhitespace = True

+        self.joinString = joinString

+        self.callPreparse = True

+

+    def ignore( self, other ):

+        if self.adjacent:

+            ParserElement.ignore(self, other)

+        else:

+            super( Combine, self).ignore( other )

+        return self

+

+    def postParse( self, instring, loc, tokenlist ):

+        retToks = tokenlist.copy()

+        del retToks[:]

+        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)

+

+        if self.resultsName and retToks.haskeys():

+            return [ retToks ]

+        else:

+            return retToks

+

+class Group(TokenConverter):

+    """

+    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.

+

+    Example::

+        ident = Word(alphas)

+        num = Word(nums)

+        term = ident | num

+        func = ident + Optional(delimitedList(term))

+        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']

+

+        func = ident + Group(Optional(delimitedList(term)))

+        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]

+    """

+    def __init__( self, expr ):

+        super(Group,self).__init__( expr )

+        self.saveAsList = True

+

+    def postParse( self, instring, loc, tokenlist ):

+        return [ tokenlist ]

+

+class Dict(TokenConverter):

+    """

+    Converter to return a repetitive expression as a list, but also as a dictionary.

+    Each element can also be referenced using the first token in the expression as its key.

+    Useful for tabular report scraping when the first column can be used as a item key.

+

+    Example::

+        data_word = Word(alphas)

+        label = data_word + FollowedBy(':')

+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

+

+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"

+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))

+        

+        # print attributes as plain groups

+        print(OneOrMore(attr_expr).parseString(text).dump())

+        

+        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names

+        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)

+        print(result.dump())

+        

+        # access named fields as dict entries, or output as dict

+        print(result['shape'])        

+        print(result.asDict())

+    prints::

+        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']

+

+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]

+        - color: light blue

+        - posn: upper left

+        - shape: SQUARE

+        - texture: burlap

+        SQUARE

+        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}

+    See more examples at L{ParseResults} of accessing fields by results name.

+    """

+    def __init__( self, expr ):

+        super(Dict,self).__init__( expr )

+        self.saveAsList = True

+

+    def postParse( self, instring, loc, tokenlist ):

+        for i,tok in enumerate(tokenlist):

+            if len(tok) == 0:

+                continue

+            ikey = tok[0]

+            if isinstance(ikey,int):

+                ikey = _ustr(tok[0]).strip()

+            if len(tok)==1:

+                tokenlist[ikey] = _ParseResultsWithOffset("",i)

+            elif len(tok)==2 and not isinstance(tok[1],ParseResults):

+                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)

+            else:

+                dictvalue = tok.copy() #ParseResults(i)

+                del dictvalue[0]

+                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):

+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)

+                else:

+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)

+

+        if self.resultsName:

+            return [ tokenlist ]

+        else:

+            return tokenlist

+

+

+class Suppress(TokenConverter):

+    """

+    Converter for ignoring the results of a parsed expression.

+

+    Example::

+        source = "a, b, c,d"

+        wd = Word(alphas)

+        wd_list1 = wd + ZeroOrMore(',' + wd)

+        print(wd_list1.parseString(source))

+

+        # often, delimiters that are useful during parsing are just in the

+        # way afterward - use Suppress to keep them out of the parsed output

+        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)

+        print(wd_list2.parseString(source))

+    prints::

+        ['a', ',', 'b', ',', 'c', ',', 'd']

+        ['a', 'b', 'c', 'd']

+    (See also L{delimitedList}.)

+    """

+    def postParse( self, instring, loc, tokenlist ):

+        return []

+

+    def suppress( self ):

+        return self

+

+

+class OnlyOnce(object):

+    """

+    Wrapper for parse actions, to ensure they are only called once.

+    """

+    def __init__(self, methodCall):

+        self.callable = _trim_arity(methodCall)

+        self.called = False

+    def __call__(self,s,l,t):

+        if not self.called:

+            results = self.callable(s,l,t)

+            self.called = True

+            return results

+        raise ParseException(s,l,"")

+    def reset(self):

+        self.called = False

+

+def traceParseAction(f):

+    """

+    Decorator for debugging parse actions. 

+    

+    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}

+    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.

+

+    Example::

+        wd = Word(alphas)

+

+        @traceParseAction

+        def remove_duplicate_chars(tokens):

+            return ''.join(sorted(set(''.join(tokens)))

+

+        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)

+        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))

+    prints::

+        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))

+        <<leaving remove_duplicate_chars (ret: 'dfjkls')

+        ['dfjkls']

+    """

+    f = _trim_arity(f)

+    def z(*paArgs):

+        thisFunc = f.__name__

+        s,l,t = paArgs[-3:]

+        if len(paArgs)>3:

+            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc

+        sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )

+        try:

+            ret = f(*paArgs)

+        except Exception as exc:

+            sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )

+            raise

+        sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )

+        return ret

+    try:

+        z.__name__ = f.__name__

+    except AttributeError:

+        pass

+    return z

+

+#

+# global helpers

+#

+def delimitedList( expr, delim=",", combine=False ):

+    """

+    Helper to define a delimited list of expressions - the delimiter defaults to ','.

+    By default, the list elements and delimiters can have intervening whitespace, and

+    comments, but this can be overridden by passing C{combine=True} in the constructor.

+    If C{combine} is set to C{True}, the matching tokens are returned as a single token

+    string, with the delimiters included; otherwise, the matching tokens are returned

+    as a list of tokens, with the delimiters suppressed.

+

+    Example::

+        delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']

+        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']

+    """

+    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."

+    if combine:

+        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)

+    else:

+        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)

+

+def countedArray( expr, intExpr=None ):

+    """

+    Helper to define a counted list of expressions.

+    This helper defines a pattern of the form::

+        integer expr expr expr...

+    where the leading integer tells how many expr expressions follow.

+    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.

+    

+    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.

+

+    Example::

+        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']

+

+        # in this parser, the leading integer value is given in binary,

+        # '10' indicating that 2 values are in the array

+        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))

+        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']

+    """

+    arrayExpr = Forward()

+    def countFieldParseAction(s,l,t):

+        n = t[0]

+        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))

+        return []

+    if intExpr is None:

+        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))

+    else:

+        intExpr = intExpr.copy()

+    intExpr.setName("arrayLen")

+    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)

+    return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')

+

+def _flatten(L):

+    ret = []

+    for i in L:

+        if isinstance(i,list):

+            ret.extend(_flatten(i))

+        else:

+            ret.append(i)

+    return ret

+

+def matchPreviousLiteral(expr):

+    """

+    Helper to define an expression that is indirectly defined from

+    the tokens matched in a previous expression, that is, it looks

+    for a 'repeat' of a previous expression.  For example::

+        first = Word(nums)

+        second = matchPreviousLiteral(first)

+        matchExpr = first + ":" + second

+    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a

+    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.

+    If this is not desired, use C{matchPreviousExpr}.

+    Do I{not} use with packrat parsing enabled.

+    """

+    rep = Forward()

+    def copyTokenToRepeater(s,l,t):

+        if t:

+            if len(t) == 1:

+                rep << t[0]

+            else:

+                # flatten t tokens

+                tflat = _flatten(t.asList())

+                rep << And(Literal(tt) for tt in tflat)

+        else:

+            rep << Empty()

+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)

+    rep.setName('(prev) ' + _ustr(expr))

+    return rep

+

+def matchPreviousExpr(expr):

+    """

+    Helper to define an expression that is indirectly defined from

+    the tokens matched in a previous expression, that is, it looks

+    for a 'repeat' of a previous expression.  For example::

+        first = Word(nums)

+        second = matchPreviousExpr(first)

+        matchExpr = first + ":" + second

+    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by

+    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};

+    the expressions are evaluated first, and then compared, so

+    C{"1"} is compared with C{"10"}.

+    Do I{not} use with packrat parsing enabled.

+    """

+    rep = Forward()

+    e2 = expr.copy()

+    rep <<= e2

+    def copyTokenToRepeater(s,l,t):

+        matchTokens = _flatten(t.asList())

+        def mustMatchTheseTokens(s,l,t):

+            theseTokens = _flatten(t.asList())

+            if  theseTokens != matchTokens:

+                raise ParseException("",0,"")

+        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )

+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)

+    rep.setName('(prev) ' + _ustr(expr))

+    return rep

+

+def _escapeRegexRangeChars(s):

+    #~  escape these chars: ^-]

+    for c in r"\^-]":

+        s = s.replace(c,_bslash+c)

+    s = s.replace("\n",r"\n")

+    s = s.replace("\t",r"\t")

+    return _ustr(s)

+

+def oneOf( strs, caseless=False, useRegex=True ):

+    """

+    Helper to quickly define a set of alternative Literals, and makes sure to do

+    longest-first testing when there is a conflict, regardless of the input order,

+    but returns a C{L{MatchFirst}} for best performance.

+

+    Parameters:

+     - strs - a string of space-delimited literals, or a collection of string literals

+     - caseless - (default=C{False}) - treat all literals as caseless

+     - useRegex - (default=C{True}) - as an optimization, will generate a Regex

+          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or

+          if creating a C{Regex} raises an exception)

+

+    Example::

+        comp_oper = oneOf("< = > <= >= !=")

+        var = Word(alphas)

+        number = Word(nums)

+        term = var | number

+        comparison_expr = term + comp_oper + term

+        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))

+    prints::

+        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]

+    """

+    if caseless:

+        isequal = ( lambda a,b: a.upper() == b.upper() )

+        masks = ( lambda a,b: b.upper().startswith(a.upper()) )

+        parseElementClass = CaselessLiteral

+    else:

+        isequal = ( lambda a,b: a == b )

+        masks = ( lambda a,b: b.startswith(a) )

+        parseElementClass = Literal

+

+    symbols = []

+    if isinstance(strs,basestring):

+        symbols = strs.split()

+    elif isinstance(strs, collections.Iterable):

+        symbols = list(strs)

+    else:

+        warnings.warn("Invalid argument to oneOf, expected string or iterable",

+                SyntaxWarning, stacklevel=2)

+    if not symbols:

+        return NoMatch()

+

+    i = 0

+    while i < len(symbols)-1:

+        cur = symbols[i]

+        for j,other in enumerate(symbols[i+1:]):

+            if ( isequal(other, cur) ):

+                del symbols[i+j+1]

+                break

+            elif ( masks(cur, other) ):

+                del symbols[i+j+1]

+                symbols.insert(i,other)

+                cur = other

+                break

+        else:

+            i += 1

+

+    if not caseless and useRegex:

+        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))

+        try:

+            if len(symbols)==len("".join(symbols)):

+                return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))

+            else:

+                return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))

+        except Exception:

+            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",

+                    SyntaxWarning, stacklevel=2)

+

+

+    # last resort, just use MatchFirst

+    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))

+

+def dictOf( key, value ):

+    """

+    Helper to easily and clearly define a dictionary by specifying the respective patterns

+    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens

+    in the proper order.  The key pattern can include delimiting markers or punctuation,

+    as long as they are suppressed, thereby leaving the significant key text.  The value

+    pattern can include named results, so that the C{Dict} results can include named token

+    fields.

+

+    Example::

+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"

+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))

+        print(OneOrMore(attr_expr).parseString(text).dump())

+        

+        attr_label = label

+        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)

+

+        # similar to Dict, but simpler call format

+        result = dictOf(attr_label, attr_value).parseString(text)

+        print(result.dump())

+        print(result['shape'])

+        print(result.shape)  # object attribute access works too

+        print(result.asDict())

+    prints::

+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]

+        - color: light blue

+        - posn: upper left

+        - shape: SQUARE

+        - texture: burlap

+        SQUARE

+        SQUARE

+        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}

+    """

+    return Dict( ZeroOrMore( Group ( key + value ) ) )

+

+def originalTextFor(expr, asString=True):

+    """

+    Helper to return the original, untokenized text for a given expression.  Useful to

+    restore the parsed fields of an HTML start tag into the raw tag text itself, or to

+    revert separate tokens with intervening whitespace back to the original matching

+    input text. By default, returns astring containing the original parsed text.  

+       

+    If the optional C{asString} argument is passed as C{False}, then the return value is a 

+    C{L{ParseResults}} containing any results names that were originally matched, and a 

+    single token containing the original matched text from the input string.  So if 

+    the expression passed to C{L{originalTextFor}} contains expressions with defined

+    results names, you must set C{asString} to C{False} if you want to preserve those

+    results name values.

+

+    Example::

+        src = "this is test <b> bold <i>text</i> </b> normal text "

+        for tag in ("b","i"):

+            opener,closer = makeHTMLTags(tag)

+            patt = originalTextFor(opener + SkipTo(closer) + closer)

+            print(patt.searchString(src)[0])

+    prints::

+        ['<b> bold <i>text</i> </b>']

+        ['<i>text</i>']

+    """

+    locMarker = Empty().setParseAction(lambda s,loc,t: loc)

+    endlocMarker = locMarker.copy()

+    endlocMarker.callPreparse = False

+    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")

+    if asString:

+        extractText = lambda s,l,t: s[t._original_start:t._original_end]

+    else:

+        def extractText(s,l,t):

+            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]

+    matchExpr.setParseAction(extractText)

+    matchExpr.ignoreExprs = expr.ignoreExprs

+    return matchExpr

+

+def ungroup(expr): 

+    """

+    Helper to undo pyparsing's default grouping of And expressions, even

+    if all but one are non-empty.

+    """

+    return TokenConverter(expr).setParseAction(lambda t:t[0])

+

+def locatedExpr(expr):

+    """

+    Helper to decorate a returned token with its starting and ending locations in the input string.

+    This helper adds the following results names:

+     - locn_start = location where matched expression begins

+     - locn_end = location where matched expression ends

+     - value = the actual parsed results

+

+    Be careful if the input text contains C{<TAB>} characters, you may want to call

+    C{L{ParserElement.parseWithTabs}}

+

+    Example::

+        wd = Word(alphas)

+        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):

+            print(match)

+    prints::

+        [[0, 'ljsdf', 5]]

+        [[8, 'lksdjjf', 15]]

+        [[18, 'lkkjj', 23]]

+    """

+    locator = Empty().setParseAction(lambda s,l,t: l)

+    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))

+

+

+# convenience constants for positional expressions

+empty       = Empty().setName("empty")

+lineStart   = LineStart().setName("lineStart")

+lineEnd     = LineEnd().setName("lineEnd")

+stringStart = StringStart().setName("stringStart")

+stringEnd   = StringEnd().setName("stringEnd")

+

+_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])

+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))

+_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))

+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)

+_charRange = Group(_singleChar + Suppress("-") + _singleChar)

+_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"

+

+def srange(s):

+    r"""

+    Helper to easily define string ranges for use in Word construction.  Borrows

+    syntax from regexp '[]' string range definitions::

+        srange("[0-9]")   -> "0123456789"

+        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"

+        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"

+    The input string must be enclosed in []'s, and the returned string is the expanded

+    character set joined into a single string.

+    The values enclosed in the []'s may be:

+     - a single character

+     - an escaped character with a leading backslash (such as C{\-} or C{\]})

+     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 

+         (C{\0x##} is also supported for backwards compatibility) 

+     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)

+     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)

+     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)

+    """

+    _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))

+    try:

+        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)

+    except Exception:

+        return ""

+

+def matchOnlyAtCol(n):

+    """

+    Helper method for defining parse actions that require matching at a specific

+    column in the input text.

+    """

+    def verifyCol(strg,locn,toks):

+        if col(locn,strg) != n:

+            raise ParseException(strg,locn,"matched token not at column %d" % n)

+    return verifyCol

+

+def replaceWith(replStr):

+    """

+    Helper method for common parse actions that simply return a literal value.  Especially

+    useful when used with C{L{transformString<ParserElement.transformString>}()}.

+

+    Example::

+        num = Word(nums).setParseAction(lambda toks: int(toks[0]))

+        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))

+        term = na | num

+        

+        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]

+    """

+    return lambda s,l,t: [replStr]

+

+def removeQuotes(s,l,t):

+    """

+    Helper parse action for removing quotation marks from parsed quoted strings.

+

+    Example::

+        # by default, quotation marks are included in parsed results

+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]

+

+        # use removeQuotes to strip quotation marks from parsed results

+        quotedString.setParseAction(removeQuotes)

+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]

+    """

+    return t[0][1:-1]

+

+def tokenMap(func, *args):

+    """

+    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 

+    args are passed, they are forwarded to the given function as additional arguments after

+    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the

+    parsed data to an integer using base 16.

+

+    Example (compare the last to example in L{ParserElement.transformString}::

+        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))

+        hex_ints.runTests('''

+            00 11 22 aa FF 0a 0d 1a

+            ''')

+        

+        upperword = Word(alphas).setParseAction(tokenMap(str.upper))

+        OneOrMore(upperword).runTests('''

+            my kingdom for a horse

+            ''')

+

+        wd = Word(alphas).setParseAction(tokenMap(str.title))

+        OneOrMore(wd).setParseAction(' '.join).runTests('''

+            now is the winter of our discontent made glorious summer by this sun of york

+            ''')

+    prints::

+        00 11 22 aa FF 0a 0d 1a

+        [0, 17, 34, 170, 255, 10, 13, 26]

+

+        my kingdom for a horse

+        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']

+

+        now is the winter of our discontent made glorious summer by this sun of york

+        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']

+    """

+    def pa(s,l,t):

+        return [func(tokn, *args) for tokn in t]

+

+    try:

+        func_name = getattr(func, '__name__', 

+                            getattr(func, '__class__').__name__)

+    except Exception:

+        func_name = str(func)

+    pa.__name__ = func_name

+

+    return pa

+

+upcaseTokens = tokenMap(lambda t: _ustr(t).upper())

+"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""

+

+downcaseTokens = tokenMap(lambda t: _ustr(t).lower())

+"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""

+    

+def _makeTags(tagStr, xml):

+    """Internal helper to construct opening and closing tag expressions, given a tag name"""

+    if isinstance(tagStr,basestring):

+        resname = tagStr

+        tagStr = Keyword(tagStr, caseless=not xml)

+    else:

+        resname = tagStr.name

+

+    tagAttrName = Word(alphas,alphanums+"_-:")

+    if (xml):

+        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )

+        openTag = Suppress("<") + tagStr("tag") + \

+                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \

+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")

+    else:

+        printablesLessRAbrack = "".join(c for c in printables if c not in ">")

+        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)

+        openTag = Suppress("<") + tagStr("tag") + \

+                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \

+                Optional( Suppress("=") + tagAttrValue ) ))) + \

+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")

+    closeTag = Combine(_L("</") + tagStr + ">")

+

+    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)

+    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)

+    openTag.tag = resname

+    closeTag.tag = resname

+    return openTag, closeTag

+

+def makeHTMLTags(tagStr):

+    """

+    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches

+    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.

+

+    Example::

+        text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'

+        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple

+        a,a_end = makeHTMLTags("A")

+        link_expr = a + SkipTo(a_end)("link_text") + a_end

+        

+        for link in link_expr.searchString(text):

+            # attributes in the <A> tag (like "href" shown here) are also accessible as named results

+            print(link.link_text, '->', link.href)

+    prints::

+        pyparsing -> http://pyparsing.wikispaces.com

+    """

+    return _makeTags( tagStr, False )

+

+def makeXMLTags(tagStr):

+    """

+    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches

+    tags only in the given upper/lower case.

+

+    Example: similar to L{makeHTMLTags}

+    """

+    return _makeTags( tagStr, True )

+

+def withAttribute(*args,**attrDict):

+    """

+    Helper to create a validating parse action to be used with start tags created

+    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag

+    with a required attribute value, to avoid false matches on common tags such as

+    C{<TD>} or C{<DIV>}.

+

+    Call C{withAttribute} with a series of attribute names and values. Specify the list

+    of filter attributes names and values as:

+     - keyword arguments, as in C{(align="right")}, or

+     - as an explicit dict with C{**} operator, when an attribute name is also a Python

+          reserved word, as in C{**{"class":"Customer", "align":"right"}}

+     - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )

+    For attribute names with a namespace prefix, you must use the second form.  Attribute

+    names are matched insensitive to upper/lower case.

+       

+    If just testing for C{class} (with or without a namespace), use C{L{withClass}}.

+

+    To verify that the attribute exists, but without specifying a value, pass

+    C{withAttribute.ANY_VALUE} as the value.

+

+    Example::

+        html = '''

+            <div>

+            Some text

+            <div type="grid">1 4 0 1 0</div>

+            <div type="graph">1,3 2,3 1,1</div>

+            <div>this has no type</div>

+            </div>

+                

+        '''

+        div,div_end = makeHTMLTags("div")

+

+        # only match div tag having a type attribute with value "grid"

+        div_grid = div().setParseAction(withAttribute(type="grid"))

+        grid_expr = div_grid + SkipTo(div | div_end)("body")

+        for grid_header in grid_expr.searchString(html):

+            print(grid_header.body)

+        

+        # construct a match with any div tag having a type attribute, regardless of the value

+        div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))

+        div_expr = div_any_type + SkipTo(div | div_end)("body")

+        for div_header in div_expr.searchString(html):

+            print(div_header.body)

+    prints::

+        1 4 0 1 0

+

+        1 4 0 1 0

+        1,3 2,3 1,1

+    """

+    if args:

+        attrs = args[:]

+    else:

+        attrs = attrDict.items()

+    attrs = [(k,v) for k,v in attrs]

+    def pa(s,l,tokens):

+        for attrName,attrValue in attrs:

+            if attrName not in tokens:

+                raise ParseException(s,l,"no matching attribute " + attrName)

+            if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:

+                raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %

+                                            (attrName, tokens[attrName], attrValue))

+    return pa

+withAttribute.ANY_VALUE = object()

+

+def withClass(classname, namespace=''):

+    """

+    Simplified version of C{L{withAttribute}} when matching on a div class - made

+    difficult because C{class} is a reserved word in Python.

+

+    Example::

+        html = '''

+            <div>

+            Some text

+            <div class="grid">1 4 0 1 0</div>

+            <div class="graph">1,3 2,3 1,1</div>

+            <div>this &lt;div&gt; has no class</div>

+            </div>

+                

+        '''

+        div,div_end = makeHTMLTags("div")

+        div_grid = div().setParseAction(withClass("grid"))

+        

+        grid_expr = div_grid + SkipTo(div | div_end)("body")

+        for grid_header in grid_expr.searchString(html):

+            print(grid_header.body)

+        

+        div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))

+        div_expr = div_any_type + SkipTo(div | div_end)("body")

+        for div_header in div_expr.searchString(html):

+            print(div_header.body)

+    prints::

+        1 4 0 1 0

+

+        1 4 0 1 0

+        1,3 2,3 1,1

+    """

+    classattr = "%s:class" % namespace if namespace else "class"

+    return withAttribute(**{classattr : classname})        

+

+opAssoc = _Constants()

+opAssoc.LEFT = object()

+opAssoc.RIGHT = object()

+

+def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):

+    """

+    Helper method for constructing grammars of expressions made up of

+    operators working in a precedence hierarchy.  Operators may be unary or

+    binary, left- or right-associative.  Parse actions can also be attached

+    to operator expressions. The generated parser will also recognize the use 

+    of parentheses to override operator precedences (see example below).

+    

+    Note: if you define a deep operator list, you may see performance issues

+    when using infixNotation. See L{ParserElement.enablePackrat} for a

+    mechanism to potentially improve your parser performance.

+

+    Parameters:

+     - baseExpr - expression representing the most basic element for the nested

+     - opList - list of tuples, one for each operator precedence level in the

+      expression grammar; each tuple is of the form

+      (opExpr, numTerms, rightLeftAssoc, parseAction), where:

+       - opExpr is the pyparsing expression for the operator;

+          may also be a string, which will be converted to a Literal;

+          if numTerms is 3, opExpr is a tuple of two expressions, for the

+          two operators separating the 3 terms

+       - numTerms is the number of terms for this operator (must

+          be 1, 2, or 3)

+       - rightLeftAssoc is the indicator whether the operator is

+          right or left associative, using the pyparsing-defined

+          constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.

+       - parseAction is the parse action to be associated with

+          expressions matching this operator expression (the

+          parse action tuple member may be omitted)

+     - lpar - expression for matching left-parentheses (default=C{Suppress('(')})

+     - rpar - expression for matching right-parentheses (default=C{Suppress(')')})

+

+    Example::

+        # simple example of four-function arithmetic with ints and variable names

+        integer = pyparsing_common.signed_integer

+        varname = pyparsing_common.identifier 

+        

+        arith_expr = infixNotation(integer | varname,

+            [

+            ('-', 1, opAssoc.RIGHT),

+            (oneOf('* /'), 2, opAssoc.LEFT),

+            (oneOf('+ -'), 2, opAssoc.LEFT),

+            ])

+        

+        arith_expr.runTests('''

+            5+3*6

+            (5+3)*6

+            -2--11

+            ''', fullDump=False)

+    prints::

+        5+3*6

+        [[5, '+', [3, '*', 6]]]

+

+        (5+3)*6

+        [[[5, '+', 3], '*', 6]]

+

+        -2--11

+        [[['-', 2], '-', ['-', 11]]]

+    """

+    ret = Forward()

+    lastExpr = baseExpr | ( lpar + ret + rpar )

+    for i,operDef in enumerate(opList):

+        opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]

+        termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr

+        if arity == 3:

+            if opExpr is None or len(opExpr) != 2:

+                raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")

+            opExpr1, opExpr2 = opExpr

+        thisExpr = Forward().setName(termName)

+        if rightLeftAssoc == opAssoc.LEFT:

+            if arity == 1:

+                matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )

+            elif arity == 2:

+                if opExpr is not None:

+                    matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )

+                else:

+                    matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )

+            elif arity == 3:

+                matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \

+                            Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )

+            else:

+                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")

+        elif rightLeftAssoc == opAssoc.RIGHT:

+            if arity == 1:

+                # try to avoid LR with this extra test

+                if not isinstance(opExpr, Optional):

+                    opExpr = Optional(opExpr)

+                matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )

+            elif arity == 2:

+                if opExpr is not None:

+                    matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )

+                else:

+                    matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )

+            elif arity == 3:

+                matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \

+                            Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )

+            else:

+                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")

+        else:

+            raise ValueError("operator must indicate right or left associativity")

+        if pa:

+            matchExpr.setParseAction( pa )

+        thisExpr <<= ( matchExpr.setName(termName) | lastExpr )

+        lastExpr = thisExpr

+    ret <<= lastExpr

+    return ret

+

+operatorPrecedence = infixNotation

+"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""

+

+dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")

+sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")

+quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|

+                       Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")

+unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")

+

+def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):

+    """

+    Helper method for defining nested lists enclosed in opening and closing

+    delimiters ("(" and ")" are the default).

+

+    Parameters:

+     - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression

+     - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression

+     - content - expression for items within the nested lists (default=C{None})

+     - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})

+

+    If an expression is not provided for the content argument, the nested

+    expression will capture all whitespace-delimited content between delimiters

+    as a list of separate values.

+

+    Use the C{ignoreExpr} argument to define expressions that may contain

+    opening or closing characters that should not be treated as opening

+    or closing characters for nesting, such as quotedString or a comment

+    expression.  Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.

+    The default is L{quotedString}, but if no expressions are to be ignored,

+    then pass C{None} for this argument.

+

+    Example::

+        data_type = oneOf("void int short long char float double")

+        decl_data_type = Combine(data_type + Optional(Word('*')))

+        ident = Word(alphas+'_', alphanums+'_')

+        number = pyparsing_common.number

+        arg = Group(decl_data_type + ident)

+        LPAR,RPAR = map(Suppress, "()")

+

+        code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))

+

+        c_function = (decl_data_type("type") 

+                      + ident("name")

+                      + LPAR + Optional(delimitedList(arg), [])("args") + RPAR 

+                      + code_body("body"))

+        c_function.ignore(cStyleComment)

+        

+        source_code = '''

+            int is_odd(int x) { 

+                return (x%2); 

+            }

+                

+            int dec_to_hex(char hchar) { 

+                if (hchar >= '0' && hchar <= '9') { 

+                    return (ord(hchar)-ord('0')); 

+                } else { 

+                    return (10+ord(hchar)-ord('A'));

+                } 

+            }

+        '''

+        for func in c_function.searchString(source_code):

+            print("%(name)s (%(type)s) args: %(args)s" % func)

+

+    prints::

+        is_odd (int) args: [['int', 'x']]

+        dec_to_hex (int) args: [['char', 'hchar']]

+    """

+    if opener == closer:

+        raise ValueError("opening and closing strings cannot be the same")

+    if content is None:

+        if isinstance(opener,basestring) and isinstance(closer,basestring):

+            if len(opener) == 1 and len(closer)==1:

+                if ignoreExpr is not None:

+                    content = (Combine(OneOrMore(~ignoreExpr +

+                                    CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))

+                                ).setParseAction(lambda t:t[0].strip()))

+                else:

+                    content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS

+                                ).setParseAction(lambda t:t[0].strip()))

+            else:

+                if ignoreExpr is not None:

+                    content = (Combine(OneOrMore(~ignoreExpr + 

+                                    ~Literal(opener) + ~Literal(closer) +

+                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))

+                                ).setParseAction(lambda t:t[0].strip()))

+                else:

+                    content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +

+                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))

+                                ).setParseAction(lambda t:t[0].strip()))

+        else:

+            raise ValueError("opening and closing arguments must be strings if no content expression is given")

+    ret = Forward()

+    if ignoreExpr is not None:

+        ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )

+    else:

+        ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content )  + Suppress(closer) )

+    ret.setName('nested %s%s expression' % (opener,closer))

+    return ret

+

+def indentedBlock(blockStatementExpr, indentStack, indent=True):

+    """

+    Helper method for defining space-delimited indentation blocks, such as

+    those used to define block statements in Python source code.

+

+    Parameters:

+     - blockStatementExpr - expression defining syntax of statement that

+            is repeated within the indented block

+     - indentStack - list created by caller to manage indentation stack

+            (multiple statementWithIndentedBlock expressions within a single grammar

+            should share a common indentStack)

+     - indent - boolean indicating whether block must be indented beyond the

+            the current level; set to False for block of left-most statements

+            (default=C{True})

+

+    A valid block must contain at least one C{blockStatement}.

+

+    Example::

+        data = '''

+        def A(z):

+          A1

+          B = 100

+          G = A2

+          A2

+          A3

+        B

+        def BB(a,b,c):

+          BB1

+          def BBA():

+            bba1

+            bba2

+            bba3

+        C

+        D

+        def spam(x,y):

+             def eggs(z):

+                 pass

+        '''

+

+

+        indentStack = [1]

+        stmt = Forward()

+

+        identifier = Word(alphas, alphanums)

+        funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")

+        func_body = indentedBlock(stmt, indentStack)

+        funcDef = Group( funcDecl + func_body )

+

+        rvalue = Forward()

+        funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")

+        rvalue << (funcCall | identifier | Word(nums))

+        assignment = Group(identifier + "=" + rvalue)

+        stmt << ( funcDef | assignment | identifier )

+

+        module_body = OneOrMore(stmt)

+

+        parseTree = module_body.parseString(data)

+        parseTree.pprint()

+    prints::

+        [['def',

+          'A',

+          ['(', 'z', ')'],

+          ':',

+          [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],

+         'B',

+         ['def',

+          'BB',

+          ['(', 'a', 'b', 'c', ')'],

+          ':',

+          [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],

+         'C',

+         'D',

+         ['def',

+          'spam',

+          ['(', 'x', 'y', ')'],

+          ':',

+          [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] 

+    """

+    def checkPeerIndent(s,l,t):

+        if l >= len(s): return

+        curCol = col(l,s)

+        if curCol != indentStack[-1]:

+            if curCol > indentStack[-1]:

+                raise ParseFatalException(s,l,"illegal nesting")

+            raise ParseException(s,l,"not a peer entry")

+

+    def checkSubIndent(s,l,t):

+        curCol = col(l,s)

+        if curCol > indentStack[-1]:

+            indentStack.append( curCol )

+        else:

+            raise ParseException(s,l,"not a subentry")

+

+    def checkUnindent(s,l,t):

+        if l >= len(s): return

+        curCol = col(l,s)

+        if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):

+            raise ParseException(s,l,"not an unindent")

+        indentStack.pop()

+

+    NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())

+    INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')

+    PEER   = Empty().setParseAction(checkPeerIndent).setName('')

+    UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')

+    if indent:

+        smExpr = Group( Optional(NL) +

+            #~ FollowedBy(blockStatementExpr) +

+            INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)

+    else:

+        smExpr = Group( Optional(NL) +

+            (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )

+    blockStatementExpr.ignore(_bslash + LineEnd())

+    return smExpr.setName('indented block')

+

+alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")

+punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")

+

+anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))

+_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))

+commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")

+def replaceHTMLEntity(t):

+    """Helper parser action to replace common HTML entities with their special characters"""

+    return _htmlEntityMap.get(t.entity)

+

+# it's easy to get these comment structures wrong - they're very common, so may as well make them available

+cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")

+"Comment of the form C{/* ... */}"

+

+htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")

+"Comment of the form C{<!-- ... -->}"

+

+restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")

+dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")

+"Comment of the form C{// ... (to end of line)}"

+

+cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")

+"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"

+

+javaStyleComment = cppStyleComment

+"Same as C{L{cppStyleComment}}"

+

+pythonStyleComment = Regex(r"#.*").setName("Python style comment")

+"Comment of the form C{# ... (to end of line)}"

+

+_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +

+                                  Optional( Word(" \t") +

+                                            ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")

+commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")

+"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.

+   This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""

+

+# some other useful expressions - using lower-case class name since we are really using this as a namespace

+class pyparsing_common:

+    """

+    Here are some common low-level expressions that may be useful in jump-starting parser development:

+     - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})

+     - common L{programming identifiers<identifier>}

+     - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})

+     - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}

+     - L{UUID<uuid>}

+     - L{comma-separated list<comma_separated_list>}

+    Parse actions:

+     - C{L{convertToInteger}}

+     - C{L{convertToFloat}}

+     - C{L{convertToDate}}

+     - C{L{convertToDatetime}}

+     - C{L{stripHTMLTags}}

+     - C{L{upcaseTokens}}

+     - C{L{downcaseTokens}}

+

+    Example::

+        pyparsing_common.number.runTests('''

+            # any int or real number, returned as the appropriate type

+            100

+            -100

+            +100

+            3.14159

+            6.02e23

+            1e-12

+            ''')

+

+        pyparsing_common.fnumber.runTests('''

+            # any int or real number, returned as float

+            100

+            -100

+            +100

+            3.14159

+            6.02e23

+            1e-12

+            ''')

+

+        pyparsing_common.hex_integer.runTests('''

+            # hex numbers

+            100

+            FF

+            ''')

+

+        pyparsing_common.fraction.runTests('''

+            # fractions

+            1/2

+            -3/4

+            ''')

+

+        pyparsing_common.mixed_integer.runTests('''

+            # mixed fractions

+            1

+            1/2

+            -3/4

+            1-3/4

+            ''')

+

+        import uuid

+        pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))

+        pyparsing_common.uuid.runTests('''

+            # uuid

+            12345678-1234-5678-1234-567812345678

+            ''')

+    prints::

+        # any int or real number, returned as the appropriate type

+        100

+        [100]

+

+        -100

+        [-100]

+

+        +100

+        [100]

+

+        3.14159

+        [3.14159]

+

+        6.02e23

+        [6.02e+23]

+

+        1e-12

+        [1e-12]

+

+        # any int or real number, returned as float

+        100

+        [100.0]

+

+        -100

+        [-100.0]

+

+        +100

+        [100.0]

+

+        3.14159

+        [3.14159]

+

+        6.02e23

+        [6.02e+23]

+

+        1e-12

+        [1e-12]

+

+        # hex numbers

+        100

+        [256]

+

+        FF

+        [255]

+

+        # fractions

+        1/2

+        [0.5]

+

+        -3/4

+        [-0.75]

+

+        # mixed fractions

+        1

+        [1]

+

+        1/2

+        [0.5]

+

+        -3/4

+        [-0.75]

+

+        1-3/4

+        [1.75]

+

+        # uuid

+        12345678-1234-5678-1234-567812345678

+        [UUID('12345678-1234-5678-1234-567812345678')]

+    """

+

+    convertToInteger = tokenMap(int)

+    """

+    Parse action for converting parsed integers to Python int

+    """

+

+    convertToFloat = tokenMap(float)

+    """

+    Parse action for converting parsed numbers to Python float

+    """

+

+    integer = Word(nums).setName("integer").setParseAction(convertToInteger)

+    """expression that parses an unsigned integer, returns an int"""

+

+    hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))

+    """expression that parses a hexadecimal integer, returns an int"""

+

+    signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)

+    """expression that parses an integer with optional leading sign, returns an int"""

+

+    fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")

+    """fractional expression of an integer divided by an integer, returns a float"""

+    fraction.addParseAction(lambda t: t[0]/t[-1])

+

+    mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")

+    """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""

+    mixed_integer.addParseAction(sum)

+

+    real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)

+    """expression that parses a floating point number and returns a float"""

+

+    sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)

+    """expression that parses a floating point number with optional scientific notation and returns a float"""

+

+    # streamlining this expression makes the docs nicer-looking

+    number = (sci_real | real | signed_integer).streamline()

+    """any numeric expression, returns the corresponding Python type"""

+

+    fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)

+    """any int or real number, returned as float"""

+    

+    identifier = Word(alphas+'_', alphanums+'_').setName("identifier")

+    """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""

+    

+    ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")

+    "IPv4 address (C{0.0.0.0 - 255.255.255.255})"

+

+    _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")

+    _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")

+    _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")

+    _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)

+    _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")

+    ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")

+    "IPv6 address (long, short, or mixed form)"

+    

+    mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")

+    "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"

+

+    @staticmethod

+    def convertToDate(fmt="%Y-%m-%d"):

+        """

+        Helper to create a parse action for converting parsed date string to Python datetime.date

+

+        Params -

+         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})

+

+        Example::

+            date_expr = pyparsing_common.iso8601_date.copy()

+            date_expr.setParseAction(pyparsing_common.convertToDate())

+            print(date_expr.parseString("1999-12-31"))

+        prints::

+            [datetime.date(1999, 12, 31)]

+        """

+        def cvt_fn(s,l,t):

+            try:

+                return datetime.strptime(t[0], fmt).date()

+            except ValueError as ve:

+                raise ParseException(s, l, str(ve))

+        return cvt_fn

+

+    @staticmethod

+    def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):

+        """

+        Helper to create a parse action for converting parsed datetime string to Python datetime.datetime

+

+        Params -

+         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})

+

+        Example::

+            dt_expr = pyparsing_common.iso8601_datetime.copy()

+            dt_expr.setParseAction(pyparsing_common.convertToDatetime())

+            print(dt_expr.parseString("1999-12-31T23:59:59.999"))

+        prints::

+            [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]

+        """

+        def cvt_fn(s,l,t):

+            try:

+                return datetime.strptime(t[0], fmt)

+            except ValueError as ve:

+                raise ParseException(s, l, str(ve))

+        return cvt_fn

+

+    iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")

+    "ISO8601 date (C{yyyy-mm-dd})"

+

+    iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")

+    "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"

+

+    uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")

+    "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"

+

+    _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()

+    @staticmethod

+    def stripHTMLTags(s, l, tokens):

+        """

+        Parse action to remove HTML tags from web page HTML source

+

+        Example::

+            # strip HTML links from normal text 

+            text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'

+            td,td_end = makeHTMLTags("TD")

+            table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end

+            

+            print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'

+        """

+        return pyparsing_common._html_stripper.transformString(tokens[0])

+

+    _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') 

+                                        + Optional( White(" \t") ) ) ).streamline().setName("commaItem")

+    comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")

+    """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""

+

+    upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))

+    """Parse action to convert tokens to upper case."""

+

+    downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))

+    """Parse action to convert tokens to lower case."""

+

+

+if __name__ == "__main__":

+

+    selectToken    = CaselessLiteral("select")

+    fromToken      = CaselessLiteral("from")

+

+    ident          = Word(alphas, alphanums + "_$")

+

+    columnName     = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)

+    columnNameList = Group(delimitedList(columnName)).setName("columns")

+    columnSpec     = ('*' | columnNameList)

+

+    tableName      = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)

+    tableNameList  = Group(delimitedList(tableName)).setName("tables")

+    

+    simpleSQL      = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")

+

+    # demo runTests method, including embedded comments in test string

+    simpleSQL.runTests("""

+        # '*' as column list and dotted table name

+        select * from SYS.XYZZY

+

+        # caseless match on "SELECT", and casts back to "select"

+        SELECT * from XYZZY, ABC

+

+        # list of column names, and mixed case SELECT keyword

+        Select AA,BB,CC from Sys.dual

+

+        # multiple tables

+        Select A, B, C from Sys.dual, Table2

+

+        # invalid SELECT keyword - should fail

+        Xelect A, B, C from Sys.dual

+

+        # incomplete command - should fail

+        Select

+

+        # invalid column name - should fail

+        Select ^^^ frox Sys.dual

+

+        """)

+

+    pyparsing_common.number.runTests("""

+        100

+        -100

+        +100

+        3.14159

+        6.02e23

+        1e-12

+        """)

+

+    # any int or real number, returned as float

+    pyparsing_common.fnumber.runTests("""

+        100

+        -100

+        +100

+        3.14159

+        6.02e23

+        1e-12

+        """)

+

+    pyparsing_common.hex_integer.runTests("""

+        100

+        FF

+        """)

+

+    import uuid

+    pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))

+    pyparsing_common.uuid.runTests("""

+        12345678-1234-5678-1234-567812345678

+        """)

diff --git a/pkg_resources/_vendor/six.py b/pkg_resources/_vendor/six.py
new file mode 100644
index 0000000..190c023
--- /dev/null
+++ b/pkg_resources/_vendor/six.py
@@ -0,0 +1,868 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+        del X
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)  # Invokes __set__.
+        try:
+            # This is a bit ugly, but it avoids running this again by
+            # removing this descriptor.
+            delattr(obj.__class__, self.name)
+        except AttributeError:
+            pass
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+    def __getattr__(self, attr):
+        _module = self._resolve()
+        value = getattr(_module, attr)
+        setattr(self, attr, value)
+        return value
+
+
+class _LazyModule(types.ModuleType):
+
+    def __init__(self, name):
+        super(_LazyModule, self).__init__(name)
+        self.__doc__ = self.__class__.__doc__
+
+    def __dir__(self):
+        attrs = ["__doc__", "__name__"]
+        attrs += [attr.name for attr in self._moved_attributes]
+        return attrs
+
+    # Subclasses should override this
+    _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+    """
+    A meta path importer to import six.moves and its submodules.
+
+    This class implements a PEP302 finder and loader. It should be compatible
+    with Python 2.5 and all existing versions of Python3
+    """
+
+    def __init__(self, six_module_name):
+        self.name = six_module_name
+        self.known_modules = {}
+
+    def _add_module(self, mod, *fullnames):
+        for fullname in fullnames:
+            self.known_modules[self.name + "." + fullname] = mod
+
+    def _get_module(self, fullname):
+        return self.known_modules[self.name + "." + fullname]
+
+    def find_module(self, fullname, path=None):
+        if fullname in self.known_modules:
+            return self
+        return None
+
+    def __get_module(self, fullname):
+        try:
+            return self.known_modules[fullname]
+        except KeyError:
+            raise ImportError("This loader does not know module " + fullname)
+
+    def load_module(self, fullname):
+        try:
+            # in case of a reload
+            return sys.modules[fullname]
+        except KeyError:
+            pass
+        mod = self.__get_module(fullname)
+        if isinstance(mod, MovedModule):
+            mod = mod._resolve()
+        else:
+            mod.__loader__ = self
+        sys.modules[fullname] = mod
+        return mod
+
+    def is_package(self, fullname):
+        """
+        Return true, if the named module is a package.
+
+        We need this method to get correct spec objects with
+        Python 3.4 (see PEP451)
+        """
+        return hasattr(self.__get_module(fullname), "__path__")
+
+    def get_code(self, fullname):
+        """Return None
+
+        Required, if is_package is implemented"""
+        self.__get_module(fullname)  # eventually raises ImportError
+        return None
+    get_source = get_code  # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+    """Lazy loading of moved objects"""
+    __path__ = []  # mark as package
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("intern", "__builtin__", "sys"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+    MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserDict", "UserDict", "collections"),
+    MovedAttribute("UserList", "UserList", "collections"),
+    MovedAttribute("UserString", "UserString", "collections"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+    MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("_thread", "thread", "_thread"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+    MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+    _moved_attributes += [
+        MovedModule("winreg", "_winreg"),
+    ]
+
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+    if isinstance(attr, MovedModule):
+        _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+    MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("quote", "urllib", "urllib.parse"),
+    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("urlencode", "urllib", "urllib.parse"),
+    MovedAttribute("splitquery", "urllib", "urllib.parse"),
+    MovedAttribute("splittag", "urllib", "urllib.parse"),
+    MovedAttribute("splituser", "urllib", "urllib.parse"),
+    MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+    setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+                      "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+    MovedAttribute("URLError", "urllib2", "urllib.error"),
+    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+    setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+                      "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+    MovedAttribute("build_opener", "urllib2", "urllib.request"),
+    MovedAttribute("pathname2url", "urllib", "urllib.request"),
+    MovedAttribute("url2pathname", "urllib", "urllib.request"),
+    MovedAttribute("getproxies", "urllib", "urllib.request"),
+    MovedAttribute("Request", "urllib2", "urllib.request"),
+    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+    MovedAttribute("URLopener", "urllib", "urllib.request"),
+    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+    MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+    setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+                      "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+    MovedAttribute("addbase", "urllib", "urllib.response"),
+    MovedAttribute("addclosehook", "urllib", "urllib.response"),
+    MovedAttribute("addinfo", "urllib", "urllib.response"),
+    MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+    setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+                      "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+                      "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+    __path__ = []  # mark as package
+    parse = _importer._get_module("moves.urllib_parse")
+    error = _importer._get_module("moves.urllib_error")
+    request = _importer._get_module("moves.urllib_request")
+    response = _importer._get_module("moves.urllib_response")
+    robotparser = _importer._get_module("moves.urllib_robotparser")
+
+    def __dir__(self):
+        return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+                      "moves.urllib")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    create_bound_method = types.MethodType
+
+    def create_unbound_method(func, cls):
+        return func
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    def create_bound_method(func, obj):
+        return types.MethodType(func, obj, obj.__class__)
+
+    def create_unbound_method(func, cls):
+        return types.MethodType(func, None, cls)
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+    def iterkeys(d, **kw):
+        return iter(d.keys(**kw))
+
+    def itervalues(d, **kw):
+        return iter(d.values(**kw))
+
+    def iteritems(d, **kw):
+        return iter(d.items(**kw))
+
+    def iterlists(d, **kw):
+        return iter(d.lists(**kw))
+
+    viewkeys = operator.methodcaller("keys")
+
+    viewvalues = operator.methodcaller("values")
+
+    viewitems = operator.methodcaller("items")
+else:
+    def iterkeys(d, **kw):
+        return d.iterkeys(**kw)
+
+    def itervalues(d, **kw):
+        return d.itervalues(**kw)
+
+    def iteritems(d, **kw):
+        return d.iteritems(**kw)
+
+    def iterlists(d, **kw):
+        return d.iterlists(**kw)
+
+    viewkeys = operator.methodcaller("viewkeys")
+
+    viewvalues = operator.methodcaller("viewvalues")
+
+    viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+         "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+         "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+
+    def u(s):
+        return s
+    unichr = chr
+    import struct
+    int2byte = struct.Struct(">B").pack
+    del struct
+    byte2int = operator.itemgetter(0)
+    indexbytes = operator.getitem
+    iterbytes = iter
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+    _assertCountEqual = "assertCountEqual"
+    if sys.version_info[1] <= 1:
+        _assertRaisesRegex = "assertRaisesRegexp"
+        _assertRegex = "assertRegexpMatches"
+    else:
+        _assertRaisesRegex = "assertRaisesRegex"
+        _assertRegex = "assertRegex"
+else:
+    def b(s):
+        return s
+    # Workaround for standalone backslash
+
+    def u(s):
+        return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+    unichr = unichr
+    int2byte = chr
+
+    def byte2int(bs):
+        return ord(bs[0])
+
+    def indexbytes(buf, i):
+        return ord(buf[i])
+    iterbytes = functools.partial(itertools.imap, ord)
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+    _assertCountEqual = "assertItemsEqual"
+    _assertRaisesRegex = "assertRaisesRegexp"
+    _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+    return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+    return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+    return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+    exec_ = getattr(moves.builtins, "exec")
+
+    def reraise(tp, value, tb=None):
+        if value is None:
+            value = tp()
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+    exec_("""def reraise(tp, value, tb=None):
+    raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+    exec_("""def raise_from(value, from_value):
+    if from_value is None:
+        raise value
+    raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+    exec_("""def raise_from(value, from_value):
+    raise value from from_value
+""")
+else:
+    def raise_from(value, from_value):
+        raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+    def print_(*args, **kwargs):
+        """The new-style print function for Python 2.4 and 2.5."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            # If the file has an encoding, encode unicode with it.
+            if (isinstance(fp, file) and
+                    isinstance(data, unicode) and
+                    fp.encoding is not None):
+                errors = getattr(fp, "errors", None)
+                if errors is None:
+                    errors = "strict"
+                data = data.encode(fp.encoding, errors)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+if sys.version_info[:2] < (3, 3):
+    _print = print_
+
+    def print_(*args, **kwargs):
+        fp = kwargs.get("file", sys.stdout)
+        flush = kwargs.pop("flush", False)
+        _print(*args, **kwargs)
+        if flush and fp is not None:
+            fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+    def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+              updated=functools.WRAPPER_UPDATES):
+        def wrapper(f):
+            f = functools.wraps(wrapped, assigned, updated)(f)
+            f.__wrapped__ = wrapped
+            return f
+        return wrapper
+else:
+    wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+    """Class decorator for creating a class with a metaclass."""
+    def wrapper(cls):
+        orig_vars = cls.__dict__.copy()
+        slots = orig_vars.get('__slots__')
+        if slots is not None:
+            if isinstance(slots, str):
+                slots = [slots]
+            for slots_var in slots:
+                orig_vars.pop(slots_var)
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
+        return metaclass(cls.__name__, cls.__bases__, orig_vars)
+    return wrapper
+
+
+def python_2_unicode_compatible(klass):
+    """
+    A decorator that defines __unicode__ and __str__ methods under Python 2.
+    Under Python 3 it does nothing.
+
+    To support Python 2 and 3 with a single code base, define a __str__ method
+    returning text and apply this decorator to the class.
+    """
+    if PY2:
+        if '__str__' not in klass.__dict__:
+            raise ValueError("@python_2_unicode_compatible cannot be applied "
+                             "to %s because it doesn't define __str__()." %
+                             klass.__name__)
+        klass.__unicode__ = klass.__str__
+        klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+    return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = []  # required for PEP 302 and PEP 451
+__package__ = __name__  # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+    __spec__.submodule_search_locations = []  # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+    for i, importer in enumerate(sys.meta_path):
+        # Here's some real nastiness: Another "instance" of the six module might
+        # be floating around. Therefore, we can't use isinstance() to check for
+        # the six meta path importer, since the other six instance will have
+        # inserted an importer with different class.
+        if (type(importer).__name__ == "_SixMetaPathImporter" and
+                importer.name == __name__):
+            del sys.meta_path[i]
+            break
+    del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/pkg_resources/_vendor/vendored.txt b/pkg_resources/_vendor/vendored.txt
new file mode 100644
index 0000000..9a94c5b
--- /dev/null
+++ b/pkg_resources/_vendor/vendored.txt
@@ -0,0 +1,4 @@
+packaging==16.8
+pyparsing==2.1.10
+six==1.10.0
+appdirs==1.4.0
diff --git a/pkg_resources/api_tests.txt b/pkg_resources/api_tests.txt
new file mode 100644
index 0000000..0a75170
--- /dev/null
+++ b/pkg_resources/api_tests.txt
@@ -0,0 +1,401 @@
+Pluggable Distributions of Python Software
+==========================================
+
+Distributions
+-------------
+
+A "Distribution" is a collection of files that represent a "Release" of a
+"Project" as of a particular point in time, denoted by a
+"Version"::
+
+    >>> import sys, pkg_resources
+    >>> from pkg_resources import Distribution
+    >>> Distribution(project_name="Foo", version="1.2")
+    Foo 1.2
+
+Distributions have a location, which can be a filename, URL, or really anything
+else you care to use::
+
+    >>> dist = Distribution(
+    ...     location="http://example.com/something",
+    ...     project_name="Bar", version="0.9"
+    ... )
+
+    >>> dist
+    Bar 0.9 (http://example.com/something)
+
+
+Distributions have various introspectable attributes::
+
+    >>> dist.location
+    'http://example.com/something'
+
+    >>> dist.project_name
+    'Bar'
+
+    >>> dist.version
+    '0.9'
+
+    >>> dist.py_version == sys.version[:3]
+    True
+
+    >>> print(dist.platform)
+    None
+
+Including various computed attributes::
+
+    >>> from pkg_resources import parse_version
+    >>> dist.parsed_version == parse_version(dist.version)
+    True
+
+    >>> dist.key    # case-insensitive form of the project name
+    'bar'
+
+Distributions are compared (and hashed) by version first::
+
+    >>> Distribution(version='1.0') == Distribution(version='1.0')
+    True
+    >>> Distribution(version='1.0') == Distribution(version='1.1')
+    False
+    >>> Distribution(version='1.0') <  Distribution(version='1.1')
+    True
+
+but also by project name (case-insensitive), platform, Python version,
+location, etc.::
+
+    >>> Distribution(project_name="Foo",version="1.0") == \
+    ... Distribution(project_name="Foo",version="1.0")
+    True
+
+    >>> Distribution(project_name="Foo",version="1.0") == \
+    ... Distribution(project_name="foo",version="1.0")
+    True
+
+    >>> Distribution(project_name="Foo",version="1.0") == \
+    ... Distribution(project_name="Foo",version="1.1")
+    False
+
+    >>> Distribution(project_name="Foo",py_version="2.3",version="1.0") == \
+    ... Distribution(project_name="Foo",py_version="2.4",version="1.0")
+    False
+
+    >>> Distribution(location="spam",version="1.0") == \
+    ... Distribution(location="spam",version="1.0")
+    True
+
+    >>> Distribution(location="spam",version="1.0") == \
+    ... Distribution(location="baz",version="1.0")
+    False
+
+
+
+Hash and compare distribution by prio/plat
+
+Get version from metadata
+provider capabilities
+egg_name()
+as_requirement()
+from_location, from_filename (w/path normalization)
+
+Releases may have zero or more "Requirements", which indicate
+what releases of another project the release requires in order to
+function.  A Requirement names the other project, expresses some criteria
+as to what releases of that project are acceptable, and lists any "Extras"
+that the requiring release may need from that project.  (An Extra is an
+optional feature of a Release, that can only be used if its additional
+Requirements are satisfied.)
+
+
+
+The Working Set
+---------------
+
+A collection of active distributions is called a Working Set.  Note that a
+Working Set can contain any importable distribution, not just pluggable ones.
+For example, the Python standard library is an importable distribution that
+will usually be part of the Working Set, even though it is not pluggable.
+Similarly, when you are doing development work on a project, the files you are
+editing are also a Distribution.  (And, with a little attention to the
+directory names used,  and including some additional metadata, such a
+"development distribution" can be made pluggable as well.)
+
+    >>> from pkg_resources import WorkingSet
+
+A working set's entries are the sys.path entries that correspond to the active
+distributions.  By default, the working set's entries are the items on
+``sys.path``::
+
+    >>> ws = WorkingSet()
+    >>> ws.entries == sys.path
+    True
+
+But you can also create an empty working set explicitly, and add distributions
+to it::
+
+    >>> ws = WorkingSet([])
+    >>> ws.add(dist)
+    >>> ws.entries
+    ['http://example.com/something']
+    >>> dist in ws
+    True
+    >>> Distribution('foo',version="") in ws
+    False
+
+And you can iterate over its distributions::
+
+    >>> list(ws)
+    [Bar 0.9 (http://example.com/something)]
+
+Adding the same distribution more than once is a no-op::
+
+    >>> ws.add(dist)
+    >>> list(ws)
+    [Bar 0.9 (http://example.com/something)]
+
+For that matter, adding multiple distributions for the same project also does
+nothing, because a working set can only hold one active distribution per
+project -- the first one added to it::
+
+    >>> ws.add(
+    ...     Distribution(
+    ...         'http://example.com/something', project_name="Bar",
+    ...         version="7.2"
+    ...     )
+    ... )
+    >>> list(ws)
+    [Bar 0.9 (http://example.com/something)]
+
+You can append a path entry to a working set using ``add_entry()``::
+
+    >>> ws.entries
+    ['http://example.com/something']
+    >>> ws.add_entry(pkg_resources.__file__)
+    >>> ws.entries
+    ['http://example.com/something', '...pkg_resources...']
+
+Multiple additions result in multiple entries, even if the entry is already in
+the working set (because ``sys.path`` can contain the same entry more than
+once)::
+
+    >>> ws.add_entry(pkg_resources.__file__)
+    >>> ws.entries
+    ['...example.com...', '...pkg_resources...', '...pkg_resources...']
+
+And you can specify the path entry a distribution was found under, using the
+optional second parameter to ``add()``::
+
+    >>> ws = WorkingSet([])
+    >>> ws.add(dist,"foo")
+    >>> ws.entries
+    ['foo']
+
+But even if a distribution is found under multiple path entries, it still only
+shows up once when iterating the working set:
+
+    >>> ws.add_entry(ws.entries[0])
+    >>> list(ws)
+    [Bar 0.9 (http://example.com/something)]
+
+You can ask a WorkingSet to ``find()`` a distribution matching a requirement::
+
+    >>> from pkg_resources import Requirement
+    >>> print(ws.find(Requirement.parse("Foo==1.0")))   # no match, return None
+    None
+
+    >>> ws.find(Requirement.parse("Bar==0.9"))  # match, return distribution
+    Bar 0.9 (http://example.com/something)
+
+Note that asking for a conflicting version of a distribution already in a
+working set triggers a ``pkg_resources.VersionConflict`` error:
+
+    >>> try:
+    ...     ws.find(Requirement.parse("Bar==1.0"))
+    ... except pkg_resources.VersionConflict as exc:
+    ...     print(str(exc))
+    ... else:
+    ...     raise AssertionError("VersionConflict was not raised")
+    (Bar 0.9 (http://example.com/something), Requirement.parse('Bar==1.0'))
+
+You can subscribe a callback function to receive notifications whenever a new
+distribution is added to a working set.  The callback is immediately invoked
+once for each existing distribution in the working set, and then is called
+again for new distributions added thereafter::
+
+    >>> def added(dist): print("Added %s" % dist)
+    >>> ws.subscribe(added)
+    Added Bar 0.9
+    >>> foo12 = Distribution(project_name="Foo", version="1.2", location="f12")
+    >>> ws.add(foo12)
+    Added Foo 1.2
+
+Note, however, that only the first distribution added for a given project name
+will trigger a callback, even during the initial ``subscribe()`` callback::
+
+    >>> foo14 = Distribution(project_name="Foo", version="1.4", location="f14")
+    >>> ws.add(foo14)   # no callback, because Foo 1.2 is already active
+
+    >>> ws = WorkingSet([])
+    >>> ws.add(foo12)
+    >>> ws.add(foo14)
+    >>> ws.subscribe(added)
+    Added Foo 1.2
+
+And adding a callback more than once has no effect, either::
+
+    >>> ws.subscribe(added)     # no callbacks
+
+    # and no double-callbacks on subsequent additions, either
+    >>> just_a_test = Distribution(project_name="JustATest", version="0.99")
+    >>> ws.add(just_a_test)
+    Added JustATest 0.99
+
+
+Finding Plugins
+---------------
+
+``WorkingSet`` objects can be used to figure out what plugins in an
+``Environment`` can be loaded without any resolution errors::
+
+    >>> from pkg_resources import Environment
+
+    >>> plugins = Environment([])   # normally, a list of plugin directories
+    >>> plugins.add(foo12)
+    >>> plugins.add(foo14)
+    >>> plugins.add(just_a_test)
+
+In the simplest case, we just get the newest version of each distribution in
+the plugin environment::
+
+    >>> ws = WorkingSet([])
+    >>> ws.find_plugins(plugins)
+    ([JustATest 0.99, Foo 1.4 (f14)], {})
+
+But if there's a problem with a version conflict or missing requirements, the
+method falls back to older versions, and the error info dict will contain an
+exception instance for each unloadable plugin::
+
+    >>> ws.add(foo12)   # this will conflict with Foo 1.4
+    >>> ws.find_plugins(plugins)
+    ([JustATest 0.99, Foo 1.2 (f12)], {Foo 1.4 (f14): VersionConflict(...)})
+
+But if you disallow fallbacks, the failed plugin will be skipped instead of
+trying older versions::
+
+    >>> ws.find_plugins(plugins, fallback=False)
+    ([JustATest 0.99], {Foo 1.4 (f14): VersionConflict(...)})
+
+
+
+Platform Compatibility Rules
+----------------------------
+
+On the Mac, there are potential compatibility issues for modules compiled
+on newer versions of Mac OS X than what the user is running. Additionally,
+Mac OS X will soon have two platforms to contend with: Intel and PowerPC.
+
+Basic equality works as on other platforms::
+
+    >>> from pkg_resources import compatible_platforms as cp
+    >>> reqd = 'macosx-10.4-ppc'
+    >>> cp(reqd, reqd)
+    True
+    >>> cp("win32", reqd)
+    False
+
+Distributions made on other machine types are not compatible::
+
+    >>> cp("macosx-10.4-i386", reqd)
+    False
+
+Distributions made on earlier versions of the OS are compatible, as
+long as they are from the same top-level version. The patchlevel version
+number does not matter::
+
+    >>> cp("macosx-10.4-ppc", reqd)
+    True
+    >>> cp("macosx-10.3-ppc", reqd)
+    True
+    >>> cp("macosx-10.5-ppc", reqd)
+    False
+    >>> cp("macosx-9.5-ppc", reqd)
+    False
+
+Backwards compatibility for packages made via earlier versions of
+setuptools is provided as well::
+
+    >>> cp("darwin-8.2.0-Power_Macintosh", reqd)
+    True
+    >>> cp("darwin-7.2.0-Power_Macintosh", reqd)
+    True
+    >>> cp("darwin-8.2.0-Power_Macintosh", "macosx-10.3-ppc")
+    False
+
+
+Environment Markers
+-------------------
+
+    >>> from pkg_resources import invalid_marker as im, evaluate_marker as em
+    >>> import os
+
+    >>> print(im("sys_platform"))
+    Invalid marker: 'sys_platform', parse error at ''
+
+    >>> print(im("sys_platform=="))
+    Invalid marker: 'sys_platform==', parse error at ''
+
+    >>> print(im("sys_platform=='win32'"))
+    False
+
+    >>> print(im("sys=='x'"))
+    Invalid marker: "sys=='x'", parse error at "sys=='x'"
+
+    >>> print(im("(extra)"))
+    Invalid marker: '(extra)', parse error at ')'
+
+    >>> print(im("(extra"))
+    Invalid marker: '(extra', parse error at ''
+
+    >>> print(im("os.open('foo')=='y'"))
+    Invalid marker: "os.open('foo')=='y'", parse error at 'os.open('
+
+    >>> print(im("'x'=='y' and os.open('foo')=='y'"))   # no short-circuit!
+    Invalid marker: "'x'=='y' and os.open('foo')=='y'", parse error at 'and os.o'
+
+    >>> print(im("'x'=='x' or os.open('foo')=='y'"))   # no short-circuit!
+    Invalid marker: "'x'=='x' or os.open('foo')=='y'", parse error at 'or os.op'
+
+    >>> print(im("'x' < 'y' < 'z'"))
+    Invalid marker: "'x' < 'y' < 'z'", parse error at "< 'z'"
+
+    >>> print(im("r'x'=='x'"))
+    Invalid marker: "r'x'=='x'", parse error at "r'x'=='x"
+
+    >>> print(im("'''x'''=='x'"))
+    Invalid marker: "'''x'''=='x'", parse error at "'x'''=='"
+
+    >>> print(im('"""x"""=="x"'))
+    Invalid marker: '"""x"""=="x"', parse error at '"x"""=="'
+
+    >>> print(im(r"x\n=='x'"))
+    Invalid marker: "x\\n=='x'", parse error at "x\\n=='x'"
+
+    >>> print(im("os.open=='y'"))
+    Invalid marker: "os.open=='y'", parse error at 'os.open='
+
+    >>> em("sys_platform=='win32'") == (sys.platform=='win32')
+    True
+
+    >>> em("python_version >= '2.7'")
+    True
+
+    >>> em("python_version > '2.6'")
+    True
+
+    >>> im("implementation_name=='cpython'")
+    False
+
+    >>> im("platform_python_implementation=='CPython'")
+    False
+
+    >>> im("implementation_version=='3.5.1'")
+    False
diff --git a/pkg_resources/extern/__init__.py b/pkg_resources/extern/__init__.py
new file mode 100644
index 0000000..b4156fe
--- /dev/null
+++ b/pkg_resources/extern/__init__.py
@@ -0,0 +1,73 @@
+import sys
+
+
+class VendorImporter:
+    """
+    A PEP 302 meta path importer for finding optionally-vendored
+    or otherwise naturally-installed packages from root_name.
+    """
+
+    def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
+        self.root_name = root_name
+        self.vendored_names = set(vendored_names)
+        self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
+
+    @property
+    def search_path(self):
+        """
+        Search first the vendor package then as a natural package.
+        """
+        yield self.vendor_pkg + '.'
+        yield ''
+
+    def find_module(self, fullname, path=None):
+        """
+        Return self when fullname starts with root_name and the
+        target module is one vendored through this importer.
+        """
+        root, base, target = fullname.partition(self.root_name + '.')
+        if root:
+            return
+        if not any(map(target.startswith, self.vendored_names)):
+            return
+        return self
+
+    def load_module(self, fullname):
+        """
+        Iterate over the search path to locate and load fullname.
+        """
+        root, base, target = fullname.partition(self.root_name + '.')
+        for prefix in self.search_path:
+            try:
+                extant = prefix + target
+                __import__(extant)
+                mod = sys.modules[extant]
+                sys.modules[fullname] = mod
+                # mysterious hack:
+                # Remove the reference to the extant package/module
+                # on later Python versions to cause relative imports
+                # in the vendor package to resolve the same modules
+                # as those going through this importer.
+                if sys.version_info > (3, 3):
+                    del sys.modules[extant]
+                return mod
+            except ImportError:
+                pass
+        else:
+            raise ImportError(
+                "The '{target}' package is required; "
+                "normally this is bundled with this package so if you get "
+                "this warning, consult the packager of your "
+                "distribution.".format(**locals())
+            )
+
+    def install(self):
+        """
+        Install this importer into sys.meta_path if not already present.
+        """
+        if self not in sys.meta_path:
+            sys.meta_path.append(self)
+
+
+names = 'packaging', 'pyparsing', 'six', 'appdirs'
+VendorImporter(__name__, names).install()
diff --git a/pkg_resources/py31compat.py b/pkg_resources/py31compat.py
new file mode 100644
index 0000000..331a51b
--- /dev/null
+++ b/pkg_resources/py31compat.py
@@ -0,0 +1,22 @@
+import os
+import errno
+import sys
+
+
+def _makedirs_31(path, exist_ok=False):
+    try:
+        os.makedirs(path)
+    except OSError as exc:
+        if not exist_ok or exc.errno != errno.EEXIST:
+            raise
+
+
+# rely on compatibility behavior until mode considerations
+#  and exists_ok considerations are disentangled.
+# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663
+needs_makedirs = (
+    sys.version_info < (3, 2, 5) or
+    (3, 3) <= sys.version_info < (3, 3, 6) or
+    (3, 4) <= sys.version_info < (3, 4, 1)
+)
+makedirs = _makedirs_31 if needs_makedirs else os.makedirs
diff --git a/pkg_resources/tests/__init__.py b/pkg_resources/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg_resources/tests/__init__.py
diff --git a/pkg_resources/tests/test_find_distributions.py b/pkg_resources/tests/test_find_distributions.py
new file mode 100644
index 0000000..d735c59
--- /dev/null
+++ b/pkg_resources/tests/test_find_distributions.py
@@ -0,0 +1,66 @@
+import subprocess
+import sys
+
+import pytest
+import pkg_resources
+
+SETUP_TEMPLATE = """
+import setuptools
+setuptools.setup(
+    name="my-test-package",
+    version="1.0",
+    zip_safe=True,
+)
+""".lstrip()
+
+
+class TestFindDistributions:
+
+    @pytest.fixture
+    def target_dir(self, tmpdir):
+        target_dir = tmpdir.mkdir('target')
+        # place a .egg named directory in the target that is not an egg:
+        target_dir.mkdir('not.an.egg')
+        return str(target_dir)
+
+    @pytest.fixture
+    def project_dir(self, tmpdir):
+        project_dir = tmpdir.mkdir('my-test-package')
+        (project_dir / "setup.py").write(SETUP_TEMPLATE)
+        return str(project_dir)
+
+    def test_non_egg_dir_named_egg(self, target_dir):
+        dists = pkg_resources.find_distributions(target_dir)
+        assert not list(dists)
+
+    def test_standalone_egg_directory(self, project_dir, target_dir):
+        # install this distro as an unpacked egg:
+        args = [
+            sys.executable,
+            '-c', 'from setuptools.command.easy_install import main; main()',
+            '-mNx',
+            '-d', target_dir,
+            '--always-unzip',
+            project_dir,
+        ]
+        subprocess.check_call(args)
+        dists = pkg_resources.find_distributions(target_dir)
+        assert [dist.project_name for dist in dists] == ['my-test-package']
+        dists = pkg_resources.find_distributions(target_dir, only=True)
+        assert not list(dists)
+
+    def test_zipped_egg(self, project_dir, target_dir):
+        # install this distro as an unpacked egg:
+        args = [
+            sys.executable,
+            '-c', 'from setuptools.command.easy_install import main; main()',
+            '-mNx',
+            '-d', target_dir,
+            '--zip-ok',
+            project_dir,
+        ]
+        subprocess.check_call(args)
+        dists = pkg_resources.find_distributions(target_dir)
+        assert [dist.project_name for dist in dists] == ['my-test-package']
+        dists = pkg_resources.find_distributions(target_dir, only=True)
+        assert not list(dists)
diff --git a/pkg_resources/tests/test_markers.py b/pkg_resources/tests/test_markers.py
new file mode 100644
index 0000000..15a3b49
--- /dev/null
+++ b/pkg_resources/tests/test_markers.py
@@ -0,0 +1,8 @@
+import mock
+
+from pkg_resources import evaluate_marker
+
+
+@mock.patch('platform.python_version', return_value='2.7.10')
+def test_ordering(python_version_mock):
+    assert evaluate_marker("python_full_version > '2.7.3'") is True
diff --git a/pkg_resources/tests/test_pkg_resources.py b/pkg_resources/tests/test_pkg_resources.py
new file mode 100644
index 0000000..7442b79
--- /dev/null
+++ b/pkg_resources/tests/test_pkg_resources.py
@@ -0,0 +1,209 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import sys
+import tempfile
+import os
+import zipfile
+import datetime
+import time
+import subprocess
+import stat
+import distutils.dist
+import distutils.command.install_egg_info
+
+from pkg_resources.extern.six.moves import map
+
+import pytest
+
+import pkg_resources
+
+try:
+    unicode
+except NameError:
+    unicode = str
+
+
+def timestamp(dt):
+    """
+    Return a timestamp for a local, naive datetime instance.
+    """
+    try:
+        return dt.timestamp()
+    except AttributeError:
+        # Python 3.2 and earlier
+        return time.mktime(dt.timetuple())
+
+
+class EggRemover(unicode):
+    def __call__(self):
+        if self in sys.path:
+            sys.path.remove(self)
+        if os.path.exists(self):
+            os.remove(self)
+
+
+class TestZipProvider(object):
+    finalizers = []
+
+    ref_time = datetime.datetime(2013, 5, 12, 13, 25, 0)
+    "A reference time for a file modification"
+
+    @classmethod
+    def setup_class(cls):
+        "create a zip egg and add it to sys.path"
+        egg = tempfile.NamedTemporaryFile(suffix='.egg', delete=False)
+        zip_egg = zipfile.ZipFile(egg, 'w')
+        zip_info = zipfile.ZipInfo()
+        zip_info.filename = 'mod.py'
+        zip_info.date_time = cls.ref_time.timetuple()
+        zip_egg.writestr(zip_info, 'x = 3\n')
+        zip_info = zipfile.ZipInfo()
+        zip_info.filename = 'data.dat'
+        zip_info.date_time = cls.ref_time.timetuple()
+        zip_egg.writestr(zip_info, 'hello, world!')
+        zip_info = zipfile.ZipInfo()
+        zip_info.filename = 'subdir/mod2.py'
+        zip_info.date_time = cls.ref_time.timetuple()
+        zip_egg.writestr(zip_info, 'x = 6\n')
+        zip_info = zipfile.ZipInfo()
+        zip_info.filename = 'subdir/data2.dat'
+        zip_info.date_time = cls.ref_time.timetuple()
+        zip_egg.writestr(zip_info, 'goodbye, world!')
+        zip_egg.close()
+        egg.close()
+
+        sys.path.append(egg.name)
+        subdir = os.path.join(egg.name, 'subdir')
+        sys.path.append(subdir)
+        cls.finalizers.append(EggRemover(subdir))
+        cls.finalizers.append(EggRemover(egg.name))
+
+    @classmethod
+    def teardown_class(cls):
+        for finalizer in cls.finalizers:
+            finalizer()
+
+    def test_resource_listdir(self):
+        import mod
+        zp = pkg_resources.ZipProvider(mod)
+
+        expected_root = ['data.dat', 'mod.py', 'subdir']
+        assert sorted(zp.resource_listdir('')) == expected_root
+        assert sorted(zp.resource_listdir('/')) == expected_root
+
+        expected_subdir = ['data2.dat', 'mod2.py']
+        assert sorted(zp.resource_listdir('subdir')) == expected_subdir
+        assert sorted(zp.resource_listdir('subdir/')) == expected_subdir
+
+        assert zp.resource_listdir('nonexistent') == []
+        assert zp.resource_listdir('nonexistent/') == []
+
+        import mod2
+        zp2 = pkg_resources.ZipProvider(mod2)
+
+        assert sorted(zp2.resource_listdir('')) == expected_subdir
+        assert sorted(zp2.resource_listdir('/')) == expected_subdir
+
+        assert zp2.resource_listdir('subdir') == []
+        assert zp2.resource_listdir('subdir/') == []
+
+    def test_resource_filename_rewrites_on_change(self):
+        """
+        If a previous call to get_resource_filename has saved the file, but
+        the file has been subsequently mutated with different file of the
+        same size and modification time, it should not be overwritten on a
+        subsequent call to get_resource_filename.
+        """
+        import mod
+        manager = pkg_resources.ResourceManager()
+        zp = pkg_resources.ZipProvider(mod)
+        filename = zp.get_resource_filename(manager, 'data.dat')
+        actual = datetime.datetime.fromtimestamp(os.stat(filename).st_mtime)
+        assert actual == self.ref_time
+        f = open(filename, 'w')
+        f.write('hello, world?')
+        f.close()
+        ts = timestamp(self.ref_time)
+        os.utime(filename, (ts, ts))
+        filename = zp.get_resource_filename(manager, 'data.dat')
+        with open(filename) as f:
+            assert f.read() == 'hello, world!'
+        manager.cleanup_resources()
+
+
+class TestResourceManager(object):
+    def test_get_cache_path(self):
+        mgr = pkg_resources.ResourceManager()
+        path = mgr.get_cache_path('foo')
+        type_ = str(type(path))
+        message = "Unexpected type from get_cache_path: " + type_
+        assert isinstance(path, (unicode, str)), message
+
+
+class TestIndependence:
+    """
+    Tests to ensure that pkg_resources runs independently from setuptools.
+    """
+
+    def test_setuptools_not_imported(self):
+        """
+        In a separate Python environment, import pkg_resources and assert
+        that action doesn't cause setuptools to be imported.
+        """
+        lines = (
+            'import pkg_resources',
+            'import sys',
+            (
+                'assert "setuptools" not in sys.modules, '
+                '"setuptools was imported"'
+            ),
+        )
+        cmd = [sys.executable, '-c', '; '.join(lines)]
+        subprocess.check_call(cmd)
+
+
+class TestDeepVersionLookupDistutils(object):
+    @pytest.fixture
+    def env(self, tmpdir):
+        """
+        Create a package environment, similar to a virtualenv,
+        in which packages are installed.
+        """
+
+        class Environment(str):
+            pass
+
+        env = Environment(tmpdir)
+        tmpdir.chmod(stat.S_IRWXU)
+        subs = 'home', 'lib', 'scripts', 'data', 'egg-base'
+        env.paths = dict(
+            (dirname, str(tmpdir / dirname))
+            for dirname in subs
+        )
+        list(map(os.mkdir, env.paths.values()))
+        return env
+
+    def create_foo_pkg(self, env, version):
+        """
+        Create a foo package installed (distutils-style) to env.paths['lib']
+        as version.
+        """
+        ld = "This package has unicode metadata! ❄"
+        attrs = dict(name='foo', version=version, long_description=ld)
+        dist = distutils.dist.Distribution(attrs)
+        iei_cmd = distutils.command.install_egg_info.install_egg_info(dist)
+        iei_cmd.initialize_options()
+        iei_cmd.install_dir = env.paths['lib']
+        iei_cmd.finalize_options()
+        iei_cmd.run()
+
+    def test_version_resolved_from_egg_info(self, env):
+        version = '1.11.0.dev0+2329eae'
+        self.create_foo_pkg(env, version)
+
+        # this requirement parsing will raise a VersionConflict unless the
+        # .egg-info file is parsed (see #419 on BitBucket)
+        req = pkg_resources.Requirement.parse('foo>=1.9')
+        dist = pkg_resources.WorkingSet([env.paths['lib']]).find(req)
+        assert dist.version == version
diff --git a/pkg_resources/tests/test_resources.py b/pkg_resources/tests/test_resources.py
new file mode 100644
index 0000000..05f35ad
--- /dev/null
+++ b/pkg_resources/tests/test_resources.py
@@ -0,0 +1,834 @@
+from __future__ import unicode_literals
+
+import os
+import sys
+import string
+import platform
+import itertools
+
+from pkg_resources.extern.six.moves import map
+
+import pytest
+from pkg_resources.extern import packaging
+
+import pkg_resources
+from pkg_resources import (
+    parse_requirements, VersionConflict, parse_version,
+    Distribution, EntryPoint, Requirement, safe_version, safe_name,
+    WorkingSet)
+
+
+# from Python 3.6 docs.
+def pairwise(iterable):
+    "s -> (s0,s1), (s1,s2), (s2, s3), ..."
+    a, b = itertools.tee(iterable)
+    next(b, None)
+    return zip(a, b)
+
+
+class Metadata(pkg_resources.EmptyProvider):
+    """Mock object to return metadata as if from an on-disk distribution"""
+
+    def __init__(self, *pairs):
+        self.metadata = dict(pairs)
+
+    def has_metadata(self, name):
+        return name in self.metadata
+
+    def get_metadata(self, name):
+        return self.metadata[name]
+
+    def get_metadata_lines(self, name):
+        return pkg_resources.yield_lines(self.get_metadata(name))
+
+
+dist_from_fn = pkg_resources.Distribution.from_filename
+
+
+class TestDistro:
+    def testCollection(self):
+        # empty path should produce no distributions
+        ad = pkg_resources.Environment([], platform=None, python=None)
+        assert list(ad) == []
+        assert ad['FooPkg'] == []
+        ad.add(dist_from_fn("FooPkg-1.3_1.egg"))
+        ad.add(dist_from_fn("FooPkg-1.4-py2.4-win32.egg"))
+        ad.add(dist_from_fn("FooPkg-1.2-py2.4.egg"))
+
+        # Name is in there now
+        assert ad['FooPkg']
+        # But only 1 package
+        assert list(ad) == ['foopkg']
+
+        # Distributions sort by version
+        expected = ['1.4', '1.3-1', '1.2']
+        assert [dist.version for dist in ad['FooPkg']] == expected
+
+        # Removing a distribution leaves sequence alone
+        ad.remove(ad['FooPkg'][1])
+        assert [dist.version for dist in ad['FooPkg']] == ['1.4', '1.2']
+
+        # And inserting adds them in order
+        ad.add(dist_from_fn("FooPkg-1.9.egg"))
+        assert [dist.version for dist in ad['FooPkg']] == ['1.9', '1.4', '1.2']
+
+        ws = WorkingSet([])
+        foo12 = dist_from_fn("FooPkg-1.2-py2.4.egg")
+        foo14 = dist_from_fn("FooPkg-1.4-py2.4-win32.egg")
+        req, = parse_requirements("FooPkg>=1.3")
+
+        # Nominal case: no distros on path, should yield all applicable
+        assert ad.best_match(req, ws).version == '1.9'
+        # If a matching distro is already installed, should return only that
+        ws.add(foo14)
+        assert ad.best_match(req, ws).version == '1.4'
+
+        # If the first matching distro is unsuitable, it's a version conflict
+        ws = WorkingSet([])
+        ws.add(foo12)
+        ws.add(foo14)
+        with pytest.raises(VersionConflict):
+            ad.best_match(req, ws)
+
+        # If more than one match on the path, the first one takes precedence
+        ws = WorkingSet([])
+        ws.add(foo14)
+        ws.add(foo12)
+        ws.add(foo14)
+        assert ad.best_match(req, ws).version == '1.4'
+
+    def checkFooPkg(self, d):
+        assert d.project_name == "FooPkg"
+        assert d.key == "foopkg"
+        assert d.version == "1.3.post1"
+        assert d.py_version == "2.4"
+        assert d.platform == "win32"
+        assert d.parsed_version == parse_version("1.3-1")
+
+    def testDistroBasics(self):
+        d = Distribution(
+            "/some/path",
+            project_name="FooPkg",
+            version="1.3-1",
+            py_version="2.4",
+            platform="win32",
+        )
+        self.checkFooPkg(d)
+
+        d = Distribution("/some/path")
+        assert d.py_version == sys.version[:3]
+        assert d.platform is None
+
+    def testDistroParse(self):
+        d = dist_from_fn("FooPkg-1.3.post1-py2.4-win32.egg")
+        self.checkFooPkg(d)
+        d = dist_from_fn("FooPkg-1.3.post1-py2.4-win32.egg-info")
+        self.checkFooPkg(d)
+
+    def testDistroMetadata(self):
+        d = Distribution(
+            "/some/path", project_name="FooPkg",
+            py_version="2.4", platform="win32",
+            metadata=Metadata(
+                ('PKG-INFO', "Metadata-Version: 1.0\nVersion: 1.3-1\n")
+            ),
+        )
+        self.checkFooPkg(d)
+
+    def distRequires(self, txt):
+        return Distribution("/foo", metadata=Metadata(('depends.txt', txt)))
+
+    def checkRequires(self, dist, txt, extras=()):
+        assert list(dist.requires(extras)) == list(parse_requirements(txt))
+
+    def testDistroDependsSimple(self):
+        for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0":
+            self.checkRequires(self.distRequires(v), v)
+
+    def testResolve(self):
+        ad = pkg_resources.Environment([])
+        ws = WorkingSet([])
+        # Resolving no requirements -> nothing to install
+        assert list(ws.resolve([], ad)) == []
+        # Request something not in the collection -> DistributionNotFound
+        with pytest.raises(pkg_resources.DistributionNotFound):
+            ws.resolve(parse_requirements("Foo"), ad)
+
+        Foo = Distribution.from_filename(
+            "/foo_dir/Foo-1.2.egg",
+            metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0"))
+        )
+        ad.add(Foo)
+        ad.add(Distribution.from_filename("Foo-0.9.egg"))
+
+        # Request thing(s) that are available -> list to activate
+        for i in range(3):
+            targets = list(ws.resolve(parse_requirements("Foo"), ad))
+            assert targets == [Foo]
+            list(map(ws.add, targets))
+        with pytest.raises(VersionConflict):
+            ws.resolve(parse_requirements("Foo==0.9"), ad)
+        ws = WorkingSet([])  # reset
+
+        # Request an extra that causes an unresolved dependency for "Baz"
+        with pytest.raises(pkg_resources.DistributionNotFound):
+            ws.resolve(parse_requirements("Foo[bar]"), ad)
+        Baz = Distribution.from_filename(
+            "/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo"))
+        )
+        ad.add(Baz)
+
+        # Activation list now includes resolved dependency
+        assert (
+            list(ws.resolve(parse_requirements("Foo[bar]"), ad))
+            == [Foo, Baz]
+        )
+        # Requests for conflicting versions produce VersionConflict
+        with pytest.raises(VersionConflict) as vc:
+            ws.resolve(parse_requirements("Foo==1.2\nFoo!=1.2"), ad)
+
+        msg = 'Foo 0.9 is installed but Foo==1.2 is required'
+        assert vc.value.report() == msg
+
+    def test_environment_marker_evaluation_negative(self):
+        """Environment markers are evaluated at resolution time."""
+        ad = pkg_resources.Environment([])
+        ws = WorkingSet([])
+        res = ws.resolve(parse_requirements("Foo;python_version<'2'"), ad)
+        assert list(res) == []
+
+    def test_environment_marker_evaluation_positive(self):
+        ad = pkg_resources.Environment([])
+        ws = WorkingSet([])
+        Foo = Distribution.from_filename("/foo_dir/Foo-1.2.dist-info")
+        ad.add(Foo)
+        res = ws.resolve(parse_requirements("Foo;python_version>='2'"), ad)
+        assert list(res) == [Foo]
+
+    def test_environment_marker_evaluation_called(self):
+        """
+        If one package foo requires bar without any extras,
+        markers should pass for bar without extras.
+        """
+        parent_req, = parse_requirements("foo")
+        req, = parse_requirements("bar;python_version>='2'")
+        req_extras = pkg_resources._ReqExtras({req: parent_req.extras})
+        assert req_extras.markers_pass(req)
+
+        parent_req, = parse_requirements("foo[]")
+        req, = parse_requirements("bar;python_version>='2'")
+        req_extras = pkg_resources._ReqExtras({req: parent_req.extras})
+        assert req_extras.markers_pass(req)
+
+    def test_marker_evaluation_with_extras(self):
+        """Extras are also evaluated as markers at resolution time."""
+        ad = pkg_resources.Environment([])
+        ws = WorkingSet([])
+        Foo = Distribution.from_filename(
+            "/foo_dir/Foo-1.2.dist-info",
+            metadata=Metadata(("METADATA", "Provides-Extra: baz\n"
+                               "Requires-Dist: quux; extra=='baz'"))
+        )
+        ad.add(Foo)
+        assert list(ws.resolve(parse_requirements("Foo"), ad)) == [Foo]
+        quux = Distribution.from_filename("/foo_dir/quux-1.0.dist-info")
+        ad.add(quux)
+        res = list(ws.resolve(parse_requirements("Foo[baz]"), ad))
+        assert res == [Foo, quux]
+
+    def test_marker_evaluation_with_extras_normlized(self):
+        """Extras are also evaluated as markers at resolution time."""
+        ad = pkg_resources.Environment([])
+        ws = WorkingSet([])
+        Foo = Distribution.from_filename(
+            "/foo_dir/Foo-1.2.dist-info",
+            metadata=Metadata(("METADATA", "Provides-Extra: baz-lightyear\n"
+                               "Requires-Dist: quux; extra=='baz-lightyear'"))
+        )
+        ad.add(Foo)
+        assert list(ws.resolve(parse_requirements("Foo"), ad)) == [Foo]
+        quux = Distribution.from_filename("/foo_dir/quux-1.0.dist-info")
+        ad.add(quux)
+        res = list(ws.resolve(parse_requirements("Foo[baz-lightyear]"), ad))
+        assert res == [Foo, quux]
+
+    def test_marker_evaluation_with_multiple_extras(self):
+        ad = pkg_resources.Environment([])
+        ws = WorkingSet([])
+        Foo = Distribution.from_filename(
+            "/foo_dir/Foo-1.2.dist-info",
+            metadata=Metadata(("METADATA", "Provides-Extra: baz\n"
+                               "Requires-Dist: quux; extra=='baz'\n"
+                               "Provides-Extra: bar\n"
+                               "Requires-Dist: fred; extra=='bar'\n"))
+        )
+        ad.add(Foo)
+        quux = Distribution.from_filename("/foo_dir/quux-1.0.dist-info")
+        ad.add(quux)
+        fred = Distribution.from_filename("/foo_dir/fred-0.1.dist-info")
+        ad.add(fred)
+        res = list(ws.resolve(parse_requirements("Foo[baz,bar]"), ad))
+        assert sorted(res) == [fred, quux, Foo]
+
+    def test_marker_evaluation_with_extras_loop(self):
+        ad = pkg_resources.Environment([])
+        ws = WorkingSet([])
+        a = Distribution.from_filename(
+            "/foo_dir/a-0.2.dist-info",
+            metadata=Metadata(("METADATA", "Requires-Dist: c[a]"))
+        )
+        b = Distribution.from_filename(
+            "/foo_dir/b-0.3.dist-info",
+            metadata=Metadata(("METADATA", "Requires-Dist: c[b]"))
+        )
+        c = Distribution.from_filename(
+            "/foo_dir/c-1.0.dist-info",
+            metadata=Metadata(("METADATA", "Provides-Extra: a\n"
+                               "Requires-Dist: b;extra=='a'\n"
+                               "Provides-Extra: b\n"
+                               "Requires-Dist: foo;extra=='b'"))
+        )
+        foo = Distribution.from_filename("/foo_dir/foo-0.1.dist-info")
+        for dist in (a, b, c, foo):
+            ad.add(dist)
+        res = list(ws.resolve(parse_requirements("a"), ad))
+        assert res == [a, c, b, foo]
+
+    def testDistroDependsOptions(self):
+        d = self.distRequires("""
+            Twisted>=1.5
+            [docgen]
+            ZConfig>=2.0
+            docutils>=0.3
+            [fastcgi]
+            fcgiapp>=0.1""")
+        self.checkRequires(d, "Twisted>=1.5")
+        self.checkRequires(
+            d, "Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"]
+        )
+        self.checkRequires(
+            d, "Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"]
+        )
+        self.checkRequires(
+            d, "Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(),
+            ["docgen", "fastcgi"]
+        )
+        self.checkRequires(
+            d, "Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(),
+            ["fastcgi", "docgen"]
+        )
+        with pytest.raises(pkg_resources.UnknownExtra):
+            d.requires(["foo"])
+
+
+class TestWorkingSet:
+    def test_find_conflicting(self):
+        ws = WorkingSet([])
+        Foo = Distribution.from_filename("/foo_dir/Foo-1.2.egg")
+        ws.add(Foo)
+
+        # create a requirement that conflicts with Foo 1.2
+        req = next(parse_requirements("Foo<1.2"))
+
+        with pytest.raises(VersionConflict) as vc:
+            ws.find(req)
+
+        msg = 'Foo 1.2 is installed but Foo<1.2 is required'
+        assert vc.value.report() == msg
+
+    def test_resolve_conflicts_with_prior(self):
+        """
+        A ContextualVersionConflict should be raised when a requirement
+        conflicts with a prior requirement for a different package.
+        """
+        # Create installation where Foo depends on Baz 1.0 and Bar depends on
+        # Baz 2.0.
+        ws = WorkingSet([])
+        md = Metadata(('depends.txt', "Baz==1.0"))
+        Foo = Distribution.from_filename("/foo_dir/Foo-1.0.egg", metadata=md)
+        ws.add(Foo)
+        md = Metadata(('depends.txt', "Baz==2.0"))
+        Bar = Distribution.from_filename("/foo_dir/Bar-1.0.egg", metadata=md)
+        ws.add(Bar)
+        Baz = Distribution.from_filename("/foo_dir/Baz-1.0.egg")
+        ws.add(Baz)
+        Baz = Distribution.from_filename("/foo_dir/Baz-2.0.egg")
+        ws.add(Baz)
+
+        with pytest.raises(VersionConflict) as vc:
+            ws.resolve(parse_requirements("Foo\nBar\n"))
+
+        msg = "Baz 1.0 is installed but Baz==2.0 is required by "
+        msg += repr(set(['Bar']))
+        assert vc.value.report() == msg
+
+
+class TestEntryPoints:
+    def assertfields(self, ep):
+        assert ep.name == "foo"
+        assert ep.module_name == "pkg_resources.tests.test_resources"
+        assert ep.attrs == ("TestEntryPoints",)
+        assert ep.extras == ("x",)
+        assert ep.load() is TestEntryPoints
+        expect = "foo = pkg_resources.tests.test_resources:TestEntryPoints [x]"
+        assert str(ep) == expect
+
+    def setup_method(self, method):
+        self.dist = Distribution.from_filename(
+            "FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt', '[x]')))
+
+    def testBasics(self):
+        ep = EntryPoint(
+            "foo", "pkg_resources.tests.test_resources", ["TestEntryPoints"],
+            ["x"], self.dist
+        )
+        self.assertfields(ep)
+
+    def testParse(self):
+        s = "foo = pkg_resources.tests.test_resources:TestEntryPoints [x]"
+        ep = EntryPoint.parse(s, self.dist)
+        self.assertfields(ep)
+
+        ep = EntryPoint.parse("bar baz=  spammity[PING]")
+        assert ep.name == "bar baz"
+        assert ep.module_name == "spammity"
+        assert ep.attrs == ()
+        assert ep.extras == ("ping",)
+
+        ep = EntryPoint.parse(" fizzly =  wocka:foo")
+        assert ep.name == "fizzly"
+        assert ep.module_name == "wocka"
+        assert ep.attrs == ("foo",)
+        assert ep.extras == ()
+
+        # plus in the name
+        spec = "html+mako = mako.ext.pygmentplugin:MakoHtmlLexer"
+        ep = EntryPoint.parse(spec)
+        assert ep.name == 'html+mako'
+
+    reject_specs = "foo", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2"
+
+    @pytest.mark.parametrize("reject_spec", reject_specs)
+    def test_reject_spec(self, reject_spec):
+        with pytest.raises(ValueError):
+            EntryPoint.parse(reject_spec)
+
+    def test_printable_name(self):
+        """
+        Allow any printable character in the name.
+        """
+        # Create a name with all printable characters; strip the whitespace.
+        name = string.printable.strip()
+        spec = "{name} = module:attr".format(**locals())
+        ep = EntryPoint.parse(spec)
+        assert ep.name == name
+
+    def checkSubMap(self, m):
+        assert len(m) == len(self.submap_expect)
+        for key, ep in self.submap_expect.items():
+            assert m.get(key).name == ep.name
+            assert m.get(key).module_name == ep.module_name
+            assert sorted(m.get(key).attrs) == sorted(ep.attrs)
+            assert sorted(m.get(key).extras) == sorted(ep.extras)
+
+    submap_expect = dict(
+        feature1=EntryPoint('feature1', 'somemodule', ['somefunction']),
+        feature2=EntryPoint(
+            'feature2', 'another.module', ['SomeClass'], ['extra1', 'extra2']),
+        feature3=EntryPoint('feature3', 'this.module', extras=['something'])
+    )
+    submap_str = """
+            # define features for blah blah
+            feature1 = somemodule:somefunction
+            feature2 = another.module:SomeClass [extra1,extra2]
+            feature3 = this.module [something]
+    """
+
+    def testParseList(self):
+        self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str))
+        with pytest.raises(ValueError):
+            EntryPoint.parse_group("x a", "foo=bar")
+        with pytest.raises(ValueError):
+            EntryPoint.parse_group("x", ["foo=baz", "foo=bar"])
+
+    def testParseMap(self):
+        m = EntryPoint.parse_map({'xyz': self.submap_str})
+        self.checkSubMap(m['xyz'])
+        assert list(m.keys()) == ['xyz']
+        m = EntryPoint.parse_map("[xyz]\n" + self.submap_str)
+        self.checkSubMap(m['xyz'])
+        assert list(m.keys()) == ['xyz']
+        with pytest.raises(ValueError):
+            EntryPoint.parse_map(["[xyz]", "[xyz]"])
+        with pytest.raises(ValueError):
+            EntryPoint.parse_map(self.submap_str)
+
+
+class TestRequirements:
+    def testBasics(self):
+        r = Requirement.parse("Twisted>=1.2")
+        assert str(r) == "Twisted>=1.2"
+        assert repr(r) == "Requirement.parse('Twisted>=1.2')"
+        assert r == Requirement("Twisted>=1.2")
+        assert r == Requirement("twisTed>=1.2")
+        assert r != Requirement("Twisted>=2.0")
+        assert r != Requirement("Zope>=1.2")
+        assert r != Requirement("Zope>=3.0")
+        assert r != Requirement("Twisted[extras]>=1.2")
+
+    def testOrdering(self):
+        r1 = Requirement("Twisted==1.2c1,>=1.2")
+        r2 = Requirement("Twisted>=1.2,==1.2c1")
+        assert r1 == r2
+        assert str(r1) == str(r2)
+        assert str(r2) == "Twisted==1.2c1,>=1.2"
+
+    def testBasicContains(self):
+        r = Requirement("Twisted>=1.2")
+        foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg")
+        twist11 = Distribution.from_filename("Twisted-1.1.egg")
+        twist12 = Distribution.from_filename("Twisted-1.2.egg")
+        assert parse_version('1.2') in r
+        assert parse_version('1.1') not in r
+        assert '1.2' in r
+        assert '1.1' not in r
+        assert foo_dist not in r
+        assert twist11 not in r
+        assert twist12 in r
+
+    def testOptionsAndHashing(self):
+        r1 = Requirement.parse("Twisted[foo,bar]>=1.2")
+        r2 = Requirement.parse("Twisted[bar,FOO]>=1.2")
+        assert r1 == r2
+        assert set(r1.extras) == set(("foo", "bar"))
+        assert set(r2.extras) == set(("foo", "bar"))
+        assert hash(r1) == hash(r2)
+        assert (
+            hash(r1)
+            ==
+            hash((
+                "twisted",
+                packaging.specifiers.SpecifierSet(">=1.2"),
+                frozenset(["foo", "bar"]),
+                None
+            ))
+        )
+
+    def testVersionEquality(self):
+        r1 = Requirement.parse("foo==0.3a2")
+        r2 = Requirement.parse("foo!=0.3a4")
+        d = Distribution.from_filename
+
+        assert d("foo-0.3a4.egg") not in r1
+        assert d("foo-0.3a1.egg") not in r1
+        assert d("foo-0.3a4.egg") not in r2
+
+        assert d("foo-0.3a2.egg") in r1
+        assert d("foo-0.3a2.egg") in r2
+        assert d("foo-0.3a3.egg") in r2
+        assert d("foo-0.3a5.egg") in r2
+
+    def testSetuptoolsProjectName(self):
+        """
+        The setuptools project should implement the setuptools package.
+        """
+
+        assert (
+            Requirement.parse('setuptools').project_name == 'setuptools')
+        # setuptools 0.7 and higher means setuptools.
+        assert (
+            Requirement.parse('setuptools == 0.7').project_name
+            == 'setuptools'
+        )
+        assert (
+            Requirement.parse('setuptools == 0.7a1').project_name
+            == 'setuptools'
+        )
+        assert (
+            Requirement.parse('setuptools >= 0.7').project_name
+            == 'setuptools'
+        )
+
+
+class TestParsing:
+    def testEmptyParse(self):
+        assert list(parse_requirements('')) == []
+
+    def testYielding(self):
+        for inp, out in [
+            ([], []), ('x', ['x']), ([[]], []), (' x\n y', ['x', 'y']),
+            (['x\n\n', 'y'], ['x', 'y']),
+        ]:
+            assert list(pkg_resources.yield_lines(inp)) == out
+
+    def testSplitting(self):
+        sample = """
+                    x
+                    [Y]
+                    z
+
+                    a
+                    [b ]
+                    # foo
+                    c
+                    [ d]
+                    [q]
+                    v
+                    """
+        assert (
+            list(pkg_resources.split_sections(sample))
+            ==
+            [
+                (None, ["x"]),
+                ("Y", ["z", "a"]),
+                ("b", ["c"]),
+                ("d", []),
+                ("q", ["v"]),
+            ]
+        )
+        with pytest.raises(ValueError):
+            list(pkg_resources.split_sections("[foo"))
+
+    def testSafeName(self):
+        assert safe_name("adns-python") == "adns-python"
+        assert safe_name("WSGI Utils") == "WSGI-Utils"
+        assert safe_name("WSGI  Utils") == "WSGI-Utils"
+        assert safe_name("Money$$$Maker") == "Money-Maker"
+        assert safe_name("peak.web") != "peak-web"
+
+    def testSafeVersion(self):
+        assert safe_version("1.2-1") == "1.2.post1"
+        assert safe_version("1.2 alpha") == "1.2.alpha"
+        assert safe_version("2.3.4 20050521") == "2.3.4.20050521"
+        assert safe_version("Money$$$Maker") == "Money-Maker"
+        assert safe_version("peak.web") == "peak.web"
+
+    def testSimpleRequirements(self):
+        assert (
+            list(parse_requirements('Twis-Ted>=1.2-1'))
+            ==
+            [Requirement('Twis-Ted>=1.2-1')]
+        )
+        assert (
+            list(parse_requirements('Twisted >=1.2, \\ # more\n<2.0'))
+            ==
+            [Requirement('Twisted>=1.2,<2.0')]
+        )
+        assert (
+            Requirement.parse("FooBar==1.99a3")
+            ==
+            Requirement("FooBar==1.99a3")
+        )
+        with pytest.raises(ValueError):
+            Requirement.parse(">=2.3")
+        with pytest.raises(ValueError):
+            Requirement.parse("x\\")
+        with pytest.raises(ValueError):
+            Requirement.parse("x==2 q")
+        with pytest.raises(ValueError):
+            Requirement.parse("X==1\nY==2")
+        with pytest.raises(ValueError):
+            Requirement.parse("#")
+
+    def test_requirements_with_markers(self):
+        assert (
+            Requirement.parse("foobar;os_name=='a'")
+            ==
+            Requirement.parse("foobar;os_name=='a'")
+        )
+        assert (
+            Requirement.parse("name==1.1;python_version=='2.7'")
+            !=
+            Requirement.parse("name==1.1;python_version=='3.3'")
+        )
+        assert (
+            Requirement.parse("name==1.0;python_version=='2.7'")
+            !=
+            Requirement.parse("name==1.2;python_version=='2.7'")
+        )
+        assert (
+            Requirement.parse("name[foo]==1.0;python_version=='3.3'")
+            !=
+            Requirement.parse("name[foo,bar]==1.0;python_version=='3.3'")
+        )
+
+    def test_local_version(self):
+        req, = parse_requirements('foo==1.0.org1')
+
+    def test_spaces_between_multiple_versions(self):
+        req, = parse_requirements('foo>=1.0, <3')
+        req, = parse_requirements('foo >= 1.0, < 3')
+
+    @pytest.mark.parametrize(
+        ['lower', 'upper'],
+        [
+            ('1.2-rc1', '1.2rc1'),
+            ('0.4', '0.4.0'),
+            ('0.4.0.0', '0.4.0'),
+            ('0.4.0-0', '0.4-0'),
+            ('0post1', '0.0post1'),
+            ('0pre1', '0.0c1'),
+            ('0.0.0preview1', '0c1'),
+            ('0.0c1', '0-rc1'),
+            ('1.2a1', '1.2.a.1'),
+            ('1.2.a', '1.2a'),
+        ],
+    )
+    def testVersionEquality(self, lower, upper):
+        assert parse_version(lower) == parse_version(upper)
+
+    torture = """
+        0.80.1-3 0.80.1-2 0.80.1-1 0.79.9999+0.80.0pre4-1
+        0.79.9999+0.80.0pre2-3 0.79.9999+0.80.0pre2-2
+        0.77.2-1 0.77.1-1 0.77.0-1
+        """
+
+    @pytest.mark.parametrize(
+        ['lower', 'upper'],
+        [
+            ('2.1', '2.1.1'),
+            ('2a1', '2b0'),
+            ('2a1', '2.1'),
+            ('2.3a1', '2.3'),
+            ('2.1-1', '2.1-2'),
+            ('2.1-1', '2.1.1'),
+            ('2.1', '2.1post4'),
+            ('2.1a0-20040501', '2.1'),
+            ('1.1', '02.1'),
+            ('3.2', '3.2.post0'),
+            ('3.2post1', '3.2post2'),
+            ('0.4', '4.0'),
+            ('0.0.4', '0.4.0'),
+            ('0post1', '0.4post1'),
+            ('2.1.0-rc1', '2.1.0'),
+            ('2.1dev', '2.1a0'),
+        ] + list(pairwise(reversed(torture.split()))),
+    )
+    def testVersionOrdering(self, lower, upper):
+        assert parse_version(lower) < parse_version(upper)
+
+    def testVersionHashable(self):
+        """
+        Ensure that our versions stay hashable even though we've subclassed
+        them and added some shim code to them.
+        """
+        assert (
+            hash(parse_version("1.0"))
+            ==
+            hash(parse_version("1.0"))
+        )
+
+
+class TestNamespaces:
+
+    ns_str = "__import__('pkg_resources').declare_namespace(__name__)\n"
+
+    @pytest.yield_fixture
+    def symlinked_tmpdir(self, tmpdir):
+        """
+        Where available, return the tempdir as a symlink,
+        which as revealed in #231 is more fragile than
+        a natural tempdir.
+        """
+        if not hasattr(os, 'symlink'):
+            yield str(tmpdir)
+            return
+
+        link_name = str(tmpdir) + '-linked'
+        os.symlink(str(tmpdir), link_name)
+        try:
+            yield type(tmpdir)(link_name)
+        finally:
+            os.unlink(link_name)
+
+    @pytest.yield_fixture(autouse=True)
+    def patched_path(self, tmpdir):
+        """
+        Patch sys.path to include the 'site-pkgs' dir. Also
+        restore pkg_resources._namespace_packages to its
+        former state.
+        """
+        saved_ns_pkgs = pkg_resources._namespace_packages.copy()
+        saved_sys_path = sys.path[:]
+        site_pkgs = tmpdir.mkdir('site-pkgs')
+        sys.path.append(str(site_pkgs))
+        try:
+            yield
+        finally:
+            pkg_resources._namespace_packages = saved_ns_pkgs
+            sys.path = saved_sys_path
+
+    issue591 = pytest.mark.xfail(platform.system() == 'Windows', reason="#591")
+
+    @issue591
+    def test_two_levels_deep(self, symlinked_tmpdir):
+        """
+        Test nested namespace packages
+        Create namespace packages in the following tree :
+            site-packages-1/pkg1/pkg2
+            site-packages-2/pkg1/pkg2
+        Check both are in the _namespace_packages dict and that their __path__
+        is correct
+        """
+        real_tmpdir = symlinked_tmpdir.realpath()
+        tmpdir = symlinked_tmpdir
+        sys.path.append(str(tmpdir / 'site-pkgs2'))
+        site_dirs = tmpdir / 'site-pkgs', tmpdir / 'site-pkgs2'
+        for site in site_dirs:
+            pkg1 = site / 'pkg1'
+            pkg2 = pkg1 / 'pkg2'
+            pkg2.ensure_dir()
+            (pkg1 / '__init__.py').write_text(self.ns_str, encoding='utf-8')
+            (pkg2 / '__init__.py').write_text(self.ns_str, encoding='utf-8')
+        import pkg1
+        assert "pkg1" in pkg_resources._namespace_packages
+        # attempt to import pkg2 from site-pkgs2
+        import pkg1.pkg2
+        # check the _namespace_packages dict
+        assert "pkg1.pkg2" in pkg_resources._namespace_packages
+        assert pkg_resources._namespace_packages["pkg1"] == ["pkg1.pkg2"]
+        # check the __path__ attribute contains both paths
+        expected = [
+            str(real_tmpdir / "site-pkgs" / "pkg1" / "pkg2"),
+            str(real_tmpdir / "site-pkgs2" / "pkg1" / "pkg2"),
+        ]
+        assert pkg1.pkg2.__path__ == expected
+
+    @issue591
+    def test_path_order(self, symlinked_tmpdir):
+        """
+        Test that if multiple versions of the same namespace package subpackage
+        are on different sys.path entries, that only the one earliest on
+        sys.path is imported, and that the namespace package's __path__ is in
+        the correct order.
+
+        Regression test for https://github.com/pypa/setuptools/issues/207
+        """
+
+        tmpdir = symlinked_tmpdir
+        site_dirs = (
+            tmpdir / "site-pkgs",
+            tmpdir / "site-pkgs2",
+            tmpdir / "site-pkgs3",
+        )
+
+        vers_str = "__version__ = %r"
+
+        for number, site in enumerate(site_dirs, 1):
+            if number > 1:
+                sys.path.append(str(site))
+            nspkg = site / 'nspkg'
+            subpkg = nspkg / 'subpkg'
+            subpkg.ensure_dir()
+            (nspkg / '__init__.py').write_text(self.ns_str, encoding='utf-8')
+            (subpkg / '__init__.py').write_text(
+                vers_str % number, encoding='utf-8')
+
+        import nspkg.subpkg
+        import nspkg
+        expected = [
+            str(site.realpath() / 'nspkg')
+            for site in site_dirs
+        ]
+        assert nspkg.__path__ == expected
+        assert nspkg.subpkg.__version__ == 1
diff --git a/pkg_resources/tests/test_working_set.py b/pkg_resources/tests/test_working_set.py
new file mode 100644
index 0000000..42ddcc8
--- /dev/null
+++ b/pkg_resources/tests/test_working_set.py
@@ -0,0 +1,482 @@
+import inspect
+import re
+import textwrap
+import functools
+
+import pytest
+
+import pkg_resources
+
+from .test_resources import Metadata
+
+
+def strip_comments(s):
+    return '\n'.join(
+        l for l in s.split('\n')
+        if l.strip() and not l.strip().startswith('#')
+    )
+
+
+def parse_distributions(s):
+    '''
+    Parse a series of distribution specs of the form:
+    {project_name}-{version}
+       [optional, indented requirements specification]
+
+    Example:
+
+        foo-0.2
+        bar-1.0
+          foo>=3.0
+          [feature]
+          baz
+
+    yield 2 distributions:
+        - project_name=foo, version=0.2
+        - project_name=bar, version=1.0,
+          requires=['foo>=3.0', 'baz; extra=="feature"']
+    '''
+    s = s.strip()
+    for spec in re.split('\n(?=[^\s])', s):
+        if not spec:
+            continue
+        fields = spec.split('\n', 1)
+        assert 1 <= len(fields) <= 2
+        name, version = fields.pop(0).split('-')
+        if fields:
+            requires = textwrap.dedent(fields.pop(0))
+            metadata = Metadata(('requires.txt', requires))
+        else:
+            metadata = None
+        dist = pkg_resources.Distribution(project_name=name,
+                                          version=version,
+                                          metadata=metadata)
+        yield dist
+
+
+class FakeInstaller(object):
+
+    def __init__(self, installable_dists):
+        self._installable_dists = installable_dists
+
+    def __call__(self, req):
+        return next(iter(filter(lambda dist: dist in req,
+                                self._installable_dists)), None)
+
+
+def parametrize_test_working_set_resolve(*test_list):
+    idlist = []
+    argvalues = []
+    for test in test_list:
+        (
+            name,
+            installed_dists,
+            installable_dists,
+            requirements,
+            expected1, expected2
+        ) = [
+            strip_comments(s.lstrip()) for s in
+            textwrap.dedent(test).lstrip().split('\n\n', 5)
+        ]
+        installed_dists = list(parse_distributions(installed_dists))
+        installable_dists = list(parse_distributions(installable_dists))
+        requirements = list(pkg_resources.parse_requirements(requirements))
+        for id_, replace_conflicting, expected in (
+            (name, False, expected1),
+            (name + '_replace_conflicting', True, expected2),
+        ):
+            idlist.append(id_)
+            expected = strip_comments(expected.strip())
+            if re.match('\w+$', expected):
+                expected = getattr(pkg_resources, expected)
+                assert issubclass(expected, Exception)
+            else:
+                expected = list(parse_distributions(expected))
+            argvalues.append(pytest.param(installed_dists, installable_dists,
+                                          requirements, replace_conflicting,
+                                          expected))
+    return pytest.mark.parametrize('installed_dists,installable_dists,'
+                                   'requirements,replace_conflicting,'
+                                   'resolved_dists_or_exception',
+                                   argvalues, ids=idlist)
+
+
+@parametrize_test_working_set_resolve(
+    '''
+    # id
+    noop
+
+    # installed
+
+    # installable
+
+    # wanted
+
+    # resolved
+
+    # resolved [replace conflicting]
+    ''',
+
+    '''
+    # id
+    already_installed
+
+    # installed
+    foo-3.0
+
+    # installable
+
+    # wanted
+    foo>=2.1,!=3.1,<4
+
+    # resolved
+    foo-3.0
+
+    # resolved [replace conflicting]
+    foo-3.0
+    ''',
+
+    '''
+    # id
+    installable_not_installed
+
+    # installed
+
+    # installable
+    foo-3.0
+    foo-4.0
+
+    # wanted
+    foo>=2.1,!=3.1,<4
+
+    # resolved
+    foo-3.0
+
+    # resolved [replace conflicting]
+    foo-3.0
+    ''',
+
+    '''
+    # id
+    not_installable
+
+    # installed
+
+    # installable
+
+    # wanted
+    foo>=2.1,!=3.1,<4
+
+    # resolved
+    DistributionNotFound
+
+    # resolved [replace conflicting]
+    DistributionNotFound
+    ''',
+
+    '''
+    # id
+    no_matching_version
+
+    # installed
+
+    # installable
+    foo-3.1
+
+    # wanted
+    foo>=2.1,!=3.1,<4
+
+    # resolved
+    DistributionNotFound
+
+    # resolved [replace conflicting]
+    DistributionNotFound
+    ''',
+
+    '''
+    # id
+    installable_with_installed_conflict
+
+    # installed
+    foo-3.1
+
+    # installable
+    foo-3.5
+
+    # wanted
+    foo>=2.1,!=3.1,<4
+
+    # resolved
+    VersionConflict
+
+    # resolved [replace conflicting]
+    foo-3.5
+    ''',
+
+    '''
+    # id
+    not_installable_with_installed_conflict
+
+    # installed
+    foo-3.1
+
+    # installable
+
+    # wanted
+    foo>=2.1,!=3.1,<4
+
+    # resolved
+    VersionConflict
+
+    # resolved [replace conflicting]
+    DistributionNotFound
+    ''',
+
+    '''
+    # id
+    installed_with_installed_require
+
+    # installed
+    foo-3.9
+    baz-0.1
+        foo>=2.1,!=3.1,<4
+
+    # installable
+
+    # wanted
+    baz
+
+    # resolved
+    foo-3.9
+    baz-0.1
+
+    # resolved [replace conflicting]
+    foo-3.9
+    baz-0.1
+    ''',
+
+    '''
+    # id
+    installed_with_conflicting_installed_require
+
+    # installed
+    foo-5
+    baz-0.1
+        foo>=2.1,!=3.1,<4
+
+    # installable
+
+    # wanted
+    baz
+
+    # resolved
+    VersionConflict
+
+    # resolved [replace conflicting]
+    DistributionNotFound
+    ''',
+
+    '''
+    # id
+    installed_with_installable_conflicting_require
+
+    # installed
+    foo-5
+    baz-0.1
+        foo>=2.1,!=3.1,<4
+
+    # installable
+    foo-2.9
+
+    # wanted
+    baz
+
+    # resolved
+    VersionConflict
+
+    # resolved [replace conflicting]
+    baz-0.1
+    foo-2.9
+    ''',
+
+    '''
+    # id
+    installed_with_installable_require
+
+    # installed
+    baz-0.1
+        foo>=2.1,!=3.1,<4
+
+    # installable
+    foo-3.9
+
+    # wanted
+    baz
+
+    # resolved
+    foo-3.9
+    baz-0.1
+
+    # resolved [replace conflicting]
+    foo-3.9
+    baz-0.1
+    ''',
+
+    '''
+    # id
+    installable_with_installed_require
+
+    # installed
+    foo-3.9
+
+    # installable
+    baz-0.1
+        foo>=2.1,!=3.1,<4
+
+    # wanted
+    baz
+
+    # resolved
+    foo-3.9
+    baz-0.1
+
+    # resolved [replace conflicting]
+    foo-3.9
+    baz-0.1
+    ''',
+
+    '''
+    # id
+    installable_with_installable_require
+
+    # installed
+
+    # installable
+    foo-3.9
+    baz-0.1
+        foo>=2.1,!=3.1,<4
+
+    # wanted
+    baz
+
+    # resolved
+    foo-3.9
+    baz-0.1
+
+    # resolved [replace conflicting]
+    foo-3.9
+    baz-0.1
+    ''',
+
+    '''
+    # id
+    installable_with_conflicting_installable_require
+
+    # installed
+    foo-5
+
+    # installable
+    foo-2.9
+    baz-0.1
+        foo>=2.1,!=3.1,<4
+
+    # wanted
+    baz
+
+    # resolved
+    VersionConflict
+
+    # resolved [replace conflicting]
+    baz-0.1
+    foo-2.9
+    ''',
+
+    '''
+    # id
+    conflicting_installables
+
+    # installed
+
+    # installable
+    foo-2.9
+    foo-5.0
+
+    # wanted
+    foo>=2.1,!=3.1,<4
+    foo>=4
+
+    # resolved
+    VersionConflict
+
+    # resolved [replace conflicting]
+    VersionConflict
+    ''',
+
+    '''
+    # id
+    installables_with_conflicting_requires
+
+    # installed
+
+    # installable
+    foo-2.9
+        dep==1.0
+    baz-5.0
+        dep==2.0
+    dep-1.0
+    dep-2.0
+
+    # wanted
+    foo
+    baz
+
+    # resolved
+    VersionConflict
+
+    # resolved [replace conflicting]
+    VersionConflict
+    ''',
+
+    '''
+    # id
+    installables_with_conflicting_nested_requires
+
+    # installed
+
+    # installable
+    foo-2.9
+        dep1
+    dep1-1.0
+        subdep<1.0
+    baz-5.0
+        dep2
+    dep2-1.0
+        subdep>1.0
+    subdep-0.9
+    subdep-1.1
+
+    # wanted
+    foo
+    baz
+
+    # resolved
+    VersionConflict
+
+    # resolved [replace conflicting]
+    VersionConflict
+    ''',
+)
+def test_working_set_resolve(installed_dists, installable_dists, requirements,
+                             replace_conflicting, resolved_dists_or_exception):
+    ws = pkg_resources.WorkingSet([])
+    list(map(ws.add, installed_dists))
+    resolve_call = functools.partial(
+        ws.resolve,
+        requirements, installer=FakeInstaller(installable_dists),
+        replace_conflicting=replace_conflicting,
+    )
+    if inspect.isclass(resolved_dists_or_exception):
+        with pytest.raises(resolved_dists_or_exception):
+            resolve_call()
+    else:
+        assert sorted(resolve_call()) == sorted(resolved_dists_or_exception)
diff --git a/pytest.ini b/pytest.ini
new file mode 100755
index 0000000..16fdc5a
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,6 @@
+[pytest]
+addopts=--doctest-modules --ignore release.py --ignore setuptools/lib2to3_ex.py --ignore tests/manual_test.py --ignore tests/test_pypi.py --ignore tests/shlib_test --doctest-glob=pkg_resources/api_tests.txt --ignore scripts/upload-old-releases-as-zip.py --ignore pavement.py --ignore setuptools/tests/mod_with_constant.py -rsxX
+norecursedirs=dist build *.egg setuptools/extern pkg_resources/extern .*
+flake8-ignore =
+    setuptools/site-patch.py F821
+    setuptools/py*compat.py F811
diff --git a/setup.cfg b/setup.cfg
new file mode 100755
index 0000000..1184020
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,29 @@
+[bumpversion]
+current_version = 39.1.0
+commit = True
+tag = True
+
+[egg_info]
+tag_build = 
+tag_date = 0
+
+[aliases]
+clean_egg_info = egg_info -Db ''
+release = clean_egg_info sdist bdist_wheel
+source = register sdist binary
+binary = bdist_egg upload --show-response
+
+[upload]
+repository = https://upload.pypi.org/legacy/
+
+[sdist]
+formats = zip
+
+[bdist_wheel]
+universal = 1
+
+[metadata]
+license_file = LICENSE
+
+[bumpversion:file:setup.py]
+
diff --git a/setup.py b/setup.py
new file mode 100755
index 0000000..b08552d
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+"""
+Distutils setup file, used to install or test 'setuptools'
+"""
+
+import io
+import os
+import sys
+import textwrap
+
+import setuptools
+
+here = os.path.dirname(__file__)
+
+
+def require_metadata():
+    "Prevent improper installs without necessary metadata. See #659"
+    egg_info_dir = os.path.join(here, 'setuptools.egg-info')
+    if not os.path.exists(egg_info_dir):
+        msg = (
+            "Cannot build setuptools without metadata. "
+            "Run `bootstrap.py`."
+        )
+        raise RuntimeError(msg)
+
+
+def read_commands():
+    command_ns = {}
+    cmd_module_path = 'setuptools/command/__init__.py'
+    init_path = os.path.join(here, cmd_module_path)
+    with open(init_path) as init_file:
+        exec(init_file.read(), command_ns)
+    return command_ns['__all__']
+
+
+def _gen_console_scripts():
+    yield "easy_install = setuptools.command.easy_install:main"
+
+    # Gentoo distributions manage the python-version-specific scripts
+    # themselves, so those platforms define an environment variable to
+    # suppress the creation of the version-specific scripts.
+    var_names = (
+        'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
+        'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
+    )
+    if any(os.environ.get(var) not in (None, "", "0") for var in var_names):
+        return
+    tmpl = "easy_install-{shortver} = setuptools.command.easy_install:main"
+    yield tmpl.format(shortver=sys.version[:3])
+
+
+readme_path = os.path.join(here, 'README.rst')
+with io.open(readme_path, encoding='utf-8') as readme_file:
+    long_description = readme_file.read()
+
+package_data = dict(
+    setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],
+)
+
+force_windows_specific_files = (
+    os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES", "1").lower()
+    not in ("", "0", "false", "no")
+)
+
+include_windows_files = (
+    sys.platform == 'win32' or
+    os.name == 'java' and os._name == 'nt' or
+    force_windows_specific_files
+)
+
+if include_windows_files:
+    package_data.setdefault('setuptools', []).extend(['*.exe'])
+    package_data.setdefault('setuptools.command', []).extend(['*.xml'])
+
+needs_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)
+wheel = ['wheel'] if needs_wheel else []
+
+
+def pypi_link(pkg_filename):
+    """
+    Given the filename, including md5 fragment, construct the
+    dependency link for PyPI.
+    """
+    root = 'https://files.pythonhosted.org/packages/source'
+    name, sep, rest = pkg_filename.partition('-')
+    parts = root, name[0], name, pkg_filename
+    return '/'.join(parts)
+
+
+setup_params = dict(
+    name="setuptools",
+    version="39.1.0",
+    description=(
+        "Easily download, build, install, upgrade, and uninstall "
+        "Python packages"
+    ),
+    author="Python Packaging Authority",
+    author_email="distutils-sig@python.org",
+    long_description=long_description,
+    long_description_content_type='text/x-rst; charset=UTF-8',
+    keywords="CPAN PyPI distutils eggs package management",
+    url="https://github.com/pypa/setuptools",
+    project_urls={
+        "Documentation": "https://setuptools.readthedocs.io/",
+    },
+    src_root=None,
+    packages=setuptools.find_packages(exclude=['*.tests']),
+    package_data=package_data,
+    py_modules=['easy_install'],
+    zip_safe=True,
+    entry_points={
+        "distutils.commands": [
+            "%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s" % locals()
+            for cmd in read_commands()
+        ],
+        "distutils.setup_keywords": [
+            "eager_resources        = setuptools.dist:assert_string_list",
+            "namespace_packages     = setuptools.dist:check_nsp",
+            "extras_require         = setuptools.dist:check_extras",
+            "install_requires       = setuptools.dist:check_requirements",
+            "tests_require          = setuptools.dist:check_requirements",
+            "setup_requires         = setuptools.dist:check_requirements",
+            "python_requires        = setuptools.dist:check_specifier",
+            "entry_points           = setuptools.dist:check_entry_points",
+            "test_suite             = setuptools.dist:check_test_suite",
+            "zip_safe               = setuptools.dist:assert_bool",
+            "package_data           = setuptools.dist:check_package_data",
+            "exclude_package_data   = setuptools.dist:check_package_data",
+            "include_package_data   = setuptools.dist:assert_bool",
+            "packages               = setuptools.dist:check_packages",
+            "dependency_links       = setuptools.dist:assert_string_list",
+            "test_loader            = setuptools.dist:check_importable",
+            "test_runner            = setuptools.dist:check_importable",
+            "use_2to3               = setuptools.dist:assert_bool",
+            "convert_2to3_doctests  = setuptools.dist:assert_string_list",
+            "use_2to3_fixers        = setuptools.dist:assert_string_list",
+            "use_2to3_exclude_fixers = setuptools.dist:assert_string_list",
+        ],
+        "egg_info.writers": [
+            "PKG-INFO = setuptools.command.egg_info:write_pkg_info",
+            "requires.txt = setuptools.command.egg_info:write_requirements",
+            "entry_points.txt = setuptools.command.egg_info:write_entries",
+            "eager_resources.txt = setuptools.command.egg_info:overwrite_arg",
+            (
+                "namespace_packages.txt = "
+                "setuptools.command.egg_info:overwrite_arg"
+            ),
+            "top_level.txt = setuptools.command.egg_info:write_toplevel_names",
+            "depends.txt = setuptools.command.egg_info:warn_depends_obsolete",
+            "dependency_links.txt = setuptools.command.egg_info:overwrite_arg",
+        ],
+        "console_scripts": list(_gen_console_scripts()),
+        "setuptools.installation":
+            ['eggsecutable = setuptools.command.easy_install:bootstrap'],
+    },
+    classifiers=textwrap.dedent("""
+        Development Status :: 5 - Production/Stable
+        Intended Audience :: Developers
+        License :: OSI Approved :: MIT License
+        Operating System :: OS Independent
+        Programming Language :: Python :: 2
+        Programming Language :: Python :: 2.7
+        Programming Language :: Python :: 3
+        Programming Language :: Python :: 3.3
+        Programming Language :: Python :: 3.4
+        Programming Language :: Python :: 3.5
+        Programming Language :: Python :: 3.6
+        Topic :: Software Development :: Libraries :: Python Modules
+        Topic :: System :: Archiving :: Packaging
+        Topic :: System :: Systems Administration
+        Topic :: Utilities
+        """).strip().splitlines(),
+    python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*',
+    extras_require={
+        "ssl:sys_platform=='win32'": "wincertstore==0.2",
+        "certs": "certifi==2016.9.26",
+    },
+    dependency_links=[
+        pypi_link(
+            'certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d',
+        ),
+        pypi_link(
+            'wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',
+        ),
+    ],
+    scripts=[],
+    setup_requires=[
+    ] + wheel,
+)
+
+if __name__ == '__main__':
+    # allow setup.py to run from another directory
+    here and os.chdir(here)
+    require_metadata()
+    dist = setuptools.setup(**setup_params)
diff --git a/setuptools.egg-info/PKG-INFO b/setuptools.egg-info/PKG-INFO
new file mode 100644
index 0000000..51e6da6
--- /dev/null
+++ b/setuptools.egg-info/PKG-INFO
@@ -0,0 +1,65 @@
+Metadata-Version: 2.1
+Name: setuptools
+Version: 39.1.0
+Summary: Easily download, build, install, upgrade, and uninstall Python packages
+Home-page: https://github.com/pypa/setuptools
+Author: Python Packaging Authority
+Author-email: distutils-sig@python.org
+License: UNKNOWN
+Project-URL: Documentation, https://setuptools.readthedocs.io/
+Description: .. image:: https://img.shields.io/pypi/v/setuptools.svg
+           :target: https://pypi.org/project/setuptools
+        
+        .. image:: https://readthedocs.org/projects/setuptools/badge/?version=latest
+            :target: https://setuptools.readthedocs.io
+        
+        .. image:: https://img.shields.io/travis/pypa/setuptools/master.svg?label=Linux%20build%20%40%20Travis%20CI
+           :target: https://travis-ci.org/pypa/setuptools
+        
+        .. image:: https://img.shields.io/appveyor/ci/jaraco/setuptools/master.svg?label=Windows%20build%20%40%20Appveyor
+           :target: https://ci.appveyor.com/project/jaraco/setuptools/branch/master
+        
+        .. image:: https://img.shields.io/pypi/pyversions/setuptools.svg
+        
+        See the `Installation Instructions
+        <https://packaging.python.org/installing/>`_ in the Python Packaging
+        User's Guide for instructions on installing, upgrading, and uninstalling
+        Setuptools.
+        
+        The project is `maintained at GitHub <https://github.com/pypa/setuptools>`_.
+        
+        Questions and comments should be directed to the `distutils-sig
+        mailing list <http://mail.python.org/pipermail/distutils-sig/>`_.
+        Bug reports and especially tested patches may be
+        submitted directly to the `bug tracker
+        <https://github.com/pypa/setuptools/issues>`_.
+        
+        
+        Code of Conduct
+        ---------------
+        
+        Everyone interacting in the setuptools project's codebases, issue trackers,
+        chat rooms, and mailing lists is expected to follow the
+        `PyPA Code of Conduct <https://www.pypa.io/en/latest/code-of-conduct/>`_.
+        
+Keywords: CPAN PyPI distutils eggs package management
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: System :: Archiving :: Packaging
+Classifier: Topic :: System :: Systems Administration
+Classifier: Topic :: Utilities
+Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*
+Description-Content-Type: text/x-rst; charset=UTF-8
+Provides-Extra: ssl
+Provides-Extra: certs
diff --git a/setuptools.egg-info/SOURCES.txt b/setuptools.egg-info/SOURCES.txt
new file mode 100644
index 0000000..93f588d
--- /dev/null
+++ b/setuptools.egg-info/SOURCES.txt
@@ -0,0 +1,192 @@
+CHANGES.rst
+LICENSE
+MANIFEST.in
+README.rst
+bootstrap.py
+conftest.py
+easy_install.py
+launcher.c
+msvc-build-launcher.cmd
+pavement.py
+pytest.ini
+setup.cfg
+setup.py
+tox.ini
+docs/Makefile
+docs/conf.py
+docs/developer-guide.txt
+docs/development.txt
+docs/easy_install.txt
+docs/formats.txt
+docs/history.txt
+docs/index.txt
+docs/pkg_resources.txt
+docs/python3.txt
+docs/releases.txt
+docs/requirements.txt
+docs/roadmap.txt
+docs/setuptools.txt
+docs/_templates/indexsidebar.html
+docs/_theme/nature/theme.conf
+docs/_theme/nature/static/nature.css_t
+docs/_theme/nature/static/pygments.css
+pkg_resources/__init__.py
+pkg_resources/api_tests.txt
+pkg_resources/py31compat.py
+pkg_resources/_vendor/__init__.py
+pkg_resources/_vendor/appdirs.py
+pkg_resources/_vendor/pyparsing.py
+pkg_resources/_vendor/six.py
+pkg_resources/_vendor/vendored.txt
+pkg_resources/_vendor/packaging/__about__.py
+pkg_resources/_vendor/packaging/__init__.py
+pkg_resources/_vendor/packaging/_compat.py
+pkg_resources/_vendor/packaging/_structures.py
+pkg_resources/_vendor/packaging/markers.py
+pkg_resources/_vendor/packaging/requirements.py
+pkg_resources/_vendor/packaging/specifiers.py
+pkg_resources/_vendor/packaging/utils.py
+pkg_resources/_vendor/packaging/version.py
+pkg_resources/extern/__init__.py
+pkg_resources/tests/__init__.py
+pkg_resources/tests/test_find_distributions.py
+pkg_resources/tests/test_markers.py
+pkg_resources/tests/test_pkg_resources.py
+pkg_resources/tests/test_resources.py
+pkg_resources/tests/test_working_set.py
+setuptools/__init__.py
+setuptools/archive_util.py
+setuptools/build_meta.py
+setuptools/cli-32.exe
+setuptools/cli-64.exe
+setuptools/cli.exe
+setuptools/config.py
+setuptools/dep_util.py
+setuptools/depends.py
+setuptools/dist.py
+setuptools/extension.py
+setuptools/glibc.py
+setuptools/glob.py
+setuptools/gui-32.exe
+setuptools/gui-64.exe
+setuptools/gui.exe
+setuptools/launch.py
+setuptools/lib2to3_ex.py
+setuptools/monkey.py
+setuptools/msvc.py
+setuptools/namespaces.py
+setuptools/package_index.py
+setuptools/pep425tags.py
+setuptools/py27compat.py
+setuptools/py31compat.py
+setuptools/py33compat.py
+setuptools/py36compat.py
+setuptools/sandbox.py
+setuptools/script (dev).tmpl
+setuptools/script.tmpl
+setuptools/site-patch.py
+setuptools/ssl_support.py
+setuptools/unicode_utils.py
+setuptools/version.py
+setuptools/wheel.py
+setuptools/windows_support.py
+setuptools.egg-info/PKG-INFO
+setuptools.egg-info/SOURCES.txt
+setuptools.egg-info/dependency_links.txt
+setuptools.egg-info/entry_points.txt
+setuptools.egg-info/requires.txt
+setuptools.egg-info/top_level.txt
+setuptools.egg-info/zip-safe
+setuptools/_vendor/__init__.py
+setuptools/_vendor/pyparsing.py
+setuptools/_vendor/six.py
+setuptools/_vendor/vendored.txt
+setuptools/_vendor/__pycache__/__init__.cpython-36.pyc
+setuptools/_vendor/__pycache__/six.cpython-36.pyc
+setuptools/_vendor/packaging/__about__.py
+setuptools/_vendor/packaging/__init__.py
+setuptools/_vendor/packaging/_compat.py
+setuptools/_vendor/packaging/_structures.py
+setuptools/_vendor/packaging/markers.py
+setuptools/_vendor/packaging/requirements.py
+setuptools/_vendor/packaging/specifiers.py
+setuptools/_vendor/packaging/utils.py
+setuptools/_vendor/packaging/version.py
+setuptools/_vendor/packaging/__pycache__/__about__.cpython-36.pyc
+setuptools/_vendor/packaging/__pycache__/__init__.cpython-36.pyc
+setuptools/_vendor/packaging/__pycache__/_compat.cpython-36.pyc
+setuptools/_vendor/packaging/__pycache__/_structures.cpython-36.pyc
+setuptools/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc
+setuptools/_vendor/packaging/__pycache__/version.cpython-36.pyc
+setuptools/command/__init__.py
+setuptools/command/alias.py
+setuptools/command/bdist_egg.py
+setuptools/command/bdist_rpm.py
+setuptools/command/bdist_wininst.py
+setuptools/command/build_clib.py
+setuptools/command/build_ext.py
+setuptools/command/build_py.py
+setuptools/command/develop.py
+setuptools/command/dist_info.py
+setuptools/command/easy_install.py
+setuptools/command/egg_info.py
+setuptools/command/install.py
+setuptools/command/install_egg_info.py
+setuptools/command/install_lib.py
+setuptools/command/install_scripts.py
+setuptools/command/launcher manifest.xml
+setuptools/command/py36compat.py
+setuptools/command/register.py
+setuptools/command/rotate.py
+setuptools/command/saveopts.py
+setuptools/command/sdist.py
+setuptools/command/setopt.py
+setuptools/command/test.py
+setuptools/command/upload.py
+setuptools/command/upload_docs.py
+setuptools/extern/__init__.py
+setuptools/tests/__init__.py
+setuptools/tests/contexts.py
+setuptools/tests/environment.py
+setuptools/tests/files.py
+setuptools/tests/fixtures.py
+setuptools/tests/mod_with_constant.py
+setuptools/tests/namespaces.py
+setuptools/tests/script-with-bom.py
+setuptools/tests/server.py
+setuptools/tests/test_archive_util.py
+setuptools/tests/test_bdist_egg.py
+setuptools/tests/test_build_clib.py
+setuptools/tests/test_build_ext.py
+setuptools/tests/test_build_meta.py
+setuptools/tests/test_build_py.py
+setuptools/tests/test_config.py
+setuptools/tests/test_dep_util.py
+setuptools/tests/test_depends.py
+setuptools/tests/test_develop.py
+setuptools/tests/test_dist.py
+setuptools/tests/test_dist_info.py
+setuptools/tests/test_easy_install.py
+setuptools/tests/test_egg_info.py
+setuptools/tests/test_find_packages.py
+setuptools/tests/test_install_scripts.py
+setuptools/tests/test_integration.py
+setuptools/tests/test_manifest.py
+setuptools/tests/test_msvc.py
+setuptools/tests/test_namespaces.py
+setuptools/tests/test_packageindex.py
+setuptools/tests/test_sandbox.py
+setuptools/tests/test_sdist.py
+setuptools/tests/test_setuptools.py
+setuptools/tests/test_test.py
+setuptools/tests/test_unicode_utils.py
+setuptools/tests/test_upload_docs.py
+setuptools/tests/test_virtualenv.py
+setuptools/tests/test_wheel.py
+setuptools/tests/test_windows_wrappers.py
+setuptools/tests/text.py
+setuptools/tests/textwrap.py
+setuptools/tests/indexes/test_links_priority/external.html
+setuptools/tests/indexes/test_links_priority/simple/foobar/index.html
+tests/manual_test.py
+tests/test_pypi.py
\ No newline at end of file
diff --git a/setuptools.egg-info/dependency_links.txt b/setuptools.egg-info/dependency_links.txt
new file mode 100644
index 0000000..e87d021
--- /dev/null
+++ b/setuptools.egg-info/dependency_links.txt
@@ -0,0 +1,2 @@
+https://files.pythonhosted.org/packages/source/c/certifi/certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d
+https://files.pythonhosted.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2
diff --git a/setuptools.egg-info/entry_points.txt b/setuptools.egg-info/entry_points.txt
new file mode 100644
index 0000000..4159fd0
--- /dev/null
+++ b/setuptools.egg-info/entry_points.txt
@@ -0,0 +1,65 @@
+[console_scripts]
+easy_install = setuptools.command.easy_install:main
+easy_install-3.6 = setuptools.command.easy_install:main
+
+[distutils.commands]
+alias = setuptools.command.alias:alias
+bdist_egg = setuptools.command.bdist_egg:bdist_egg
+bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm
+bdist_wininst = setuptools.command.bdist_wininst:bdist_wininst
+build_clib = setuptools.command.build_clib:build_clib
+build_ext = setuptools.command.build_ext:build_ext
+build_py = setuptools.command.build_py:build_py
+develop = setuptools.command.develop:develop
+dist_info = setuptools.command.dist_info:dist_info
+easy_install = setuptools.command.easy_install:easy_install
+egg_info = setuptools.command.egg_info:egg_info
+install = setuptools.command.install:install
+install_egg_info = setuptools.command.install_egg_info:install_egg_info
+install_lib = setuptools.command.install_lib:install_lib
+install_scripts = setuptools.command.install_scripts:install_scripts
+register = setuptools.command.register:register
+rotate = setuptools.command.rotate:rotate
+saveopts = setuptools.command.saveopts:saveopts
+sdist = setuptools.command.sdist:sdist
+setopt = setuptools.command.setopt:setopt
+test = setuptools.command.test:test
+upload = setuptools.command.upload:upload
+upload_docs = setuptools.command.upload_docs:upload_docs
+
+[distutils.setup_keywords]
+convert_2to3_doctests = setuptools.dist:assert_string_list
+dependency_links = setuptools.dist:assert_string_list
+eager_resources = setuptools.dist:assert_string_list
+entry_points = setuptools.dist:check_entry_points
+exclude_package_data = setuptools.dist:check_package_data
+extras_require = setuptools.dist:check_extras
+include_package_data = setuptools.dist:assert_bool
+install_requires = setuptools.dist:check_requirements
+namespace_packages = setuptools.dist:check_nsp
+package_data = setuptools.dist:check_package_data
+packages = setuptools.dist:check_packages
+python_requires = setuptools.dist:check_specifier
+setup_requires = setuptools.dist:check_requirements
+test_loader = setuptools.dist:check_importable
+test_runner = setuptools.dist:check_importable
+test_suite = setuptools.dist:check_test_suite
+tests_require = setuptools.dist:check_requirements
+use_2to3 = setuptools.dist:assert_bool
+use_2to3_exclude_fixers = setuptools.dist:assert_string_list
+use_2to3_fixers = setuptools.dist:assert_string_list
+zip_safe = setuptools.dist:assert_bool
+
+[egg_info.writers]
+PKG-INFO = setuptools.command.egg_info:write_pkg_info
+dependency_links.txt = setuptools.command.egg_info:overwrite_arg
+depends.txt = setuptools.command.egg_info:warn_depends_obsolete
+eager_resources.txt = setuptools.command.egg_info:overwrite_arg
+entry_points.txt = setuptools.command.egg_info:write_entries
+namespace_packages.txt = setuptools.command.egg_info:overwrite_arg
+requires.txt = setuptools.command.egg_info:write_requirements
+top_level.txt = setuptools.command.egg_info:write_toplevel_names
+
+[setuptools.installation]
+eggsecutable = setuptools.command.easy_install:bootstrap
+
diff --git a/setuptools.egg-info/requires.txt b/setuptools.egg-info/requires.txt
new file mode 100644
index 0000000..c1529e4
--- /dev/null
+++ b/setuptools.egg-info/requires.txt
@@ -0,0 +1,6 @@
+
+[certs]
+certifi==2016.9.26
+
+[ssl:sys_platform=='win32']
+wincertstore==0.2
diff --git a/setuptools.egg-info/top_level.txt b/setuptools.egg-info/top_level.txt
new file mode 100644
index 0000000..4577c6a
--- /dev/null
+++ b/setuptools.egg-info/top_level.txt
@@ -0,0 +1,3 @@
+easy_install
+pkg_resources
+setuptools
diff --git a/setuptools.egg-info/zip-safe b/setuptools.egg-info/zip-safe
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/setuptools.egg-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/setuptools/__init__.py b/setuptools/__init__.py
new file mode 100644
index 0000000..7da47fb
--- /dev/null
+++ b/setuptools/__init__.py
@@ -0,0 +1,180 @@
+"""Extensions to the 'distutils' for large or complex distributions"""
+
+import os
+import functools
+import distutils.core
+import distutils.filelist
+from distutils.util import convert_path
+from fnmatch import fnmatchcase
+
+from setuptools.extern.six.moves import filter, map
+
+import setuptools.version
+from setuptools.extension import Extension
+from setuptools.dist import Distribution, Feature
+from setuptools.depends import Require
+from . import monkey
+
+__all__ = [
+    'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
+    'find_packages',
+]
+
+__version__ = setuptools.version.__version__
+
+bootstrap_install_from = None
+
+# If we run 2to3 on .py files, should we also convert docstrings?
+# Default: yes; assume that we can detect doctests reliably
+run_2to3_on_doctests = True
+# Standard package names for fixer packages
+lib2to3_fixer_packages = ['lib2to3.fixes']
+
+
+class PackageFinder(object):
+    """
+    Generate a list of all Python packages found within a directory
+    """
+
+    @classmethod
+    def find(cls, where='.', exclude=(), include=('*',)):
+        """Return a list all Python packages found within directory 'where'
+
+        'where' is the root directory which will be searched for packages.  It
+        should be supplied as a "cross-platform" (i.e. URL-style) path; it will
+        be converted to the appropriate local path syntax.
+
+        'exclude' is a sequence of package names to exclude; '*' can be used
+        as a wildcard in the names, such that 'foo.*' will exclude all
+        subpackages of 'foo' (but not 'foo' itself).
+
+        'include' is a sequence of package names to include.  If it's
+        specified, only the named packages will be included.  If it's not
+        specified, all found packages will be included.  'include' can contain
+        shell style wildcard patterns just like 'exclude'.
+        """
+
+        return list(cls._find_packages_iter(
+            convert_path(where),
+            cls._build_filter('ez_setup', '*__pycache__', *exclude),
+            cls._build_filter(*include)))
+
+    @classmethod
+    def _find_packages_iter(cls, where, exclude, include):
+        """
+        All the packages found in 'where' that pass the 'include' filter, but
+        not the 'exclude' filter.
+        """
+        for root, dirs, files in os.walk(where, followlinks=True):
+            # Copy dirs to iterate over it, then empty dirs.
+            all_dirs = dirs[:]
+            dirs[:] = []
+
+            for dir in all_dirs:
+                full_path = os.path.join(root, dir)
+                rel_path = os.path.relpath(full_path, where)
+                package = rel_path.replace(os.path.sep, '.')
+
+                # Skip directory trees that are not valid packages
+                if ('.' in dir or not cls._looks_like_package(full_path)):
+                    continue
+
+                # Should this package be included?
+                if include(package) and not exclude(package):
+                    yield package
+
+                # Keep searching subdirectories, as there may be more packages
+                # down there, even if the parent was excluded.
+                dirs.append(dir)
+
+    @staticmethod
+    def _looks_like_package(path):
+        """Does a directory look like a package?"""
+        return os.path.isfile(os.path.join(path, '__init__.py'))
+
+    @staticmethod
+    def _build_filter(*patterns):
+        """
+        Given a list of patterns, return a callable that will be true only if
+        the input matches at least one of the patterns.
+        """
+        return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
+
+
+class PEP420PackageFinder(PackageFinder):
+    @staticmethod
+    def _looks_like_package(path):
+        return True
+
+
+find_packages = PackageFinder.find
+
+
+def _install_setup_requires(attrs):
+    # Note: do not use `setuptools.Distribution` directly, as
+    # our PEP 517 backend patch `distutils.core.Distribution`.
+    dist = distutils.core.Distribution(dict(
+        (k, v) for k, v in attrs.items()
+        if k in ('dependency_links', 'setup_requires')
+    ))
+    # Honor setup.cfg's options.
+    dist.parse_config_files(ignore_option_errors=True)
+    if dist.setup_requires:
+        dist.fetch_build_eggs(dist.setup_requires)
+
+
+def setup(**attrs):
+    # Make sure we have any requirements needed to interpret 'attrs'.
+    _install_setup_requires(attrs)
+    return distutils.core.setup(**attrs)
+
+setup.__doc__ = distutils.core.setup.__doc__
+
+
+_Command = monkey.get_unpatched(distutils.core.Command)
+
+
+class Command(_Command):
+    __doc__ = _Command.__doc__
+
+    command_consumes_arguments = False
+
+    def __init__(self, dist, **kw):
+        """
+        Construct the command for dist, updating
+        vars(self) with any keyword parameters.
+        """
+        _Command.__init__(self, dist)
+        vars(self).update(kw)
+
+    def reinitialize_command(self, command, reinit_subcommands=0, **kw):
+        cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
+        vars(cmd).update(kw)
+        return cmd
+
+
+def _find_all_simple(path):
+    """
+    Find all files under 'path'
+    """
+    results = (
+        os.path.join(base, file)
+        for base, dirs, files in os.walk(path, followlinks=True)
+        for file in files
+    )
+    return filter(os.path.isfile, results)
+
+
+def findall(dir=os.curdir):
+    """
+    Find all files under 'dir' and return the list of full filenames.
+    Unless dir is '.', return full filenames with dir prepended.
+    """
+    files = _find_all_simple(dir)
+    if dir == os.curdir:
+        make_rel = functools.partial(os.path.relpath, start=dir)
+        files = map(make_rel, files)
+    return list(files)
+
+
+monkey.patch_all()
diff --git a/setuptools/_vendor/__init__.py b/setuptools/_vendor/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/setuptools/_vendor/__init__.py
diff --git a/setuptools/_vendor/__pycache__/__init__.cpython-36.pyc b/setuptools/_vendor/__pycache__/__init__.cpython-36.pyc
new file mode 100644
index 0000000..f86f802
--- /dev/null
+++ b/setuptools/_vendor/__pycache__/__init__.cpython-36.pyc
Binary files differ
diff --git a/setuptools/_vendor/__pycache__/six.cpython-36.pyc b/setuptools/_vendor/__pycache__/six.cpython-36.pyc
new file mode 100644
index 0000000..afda210
--- /dev/null
+++ b/setuptools/_vendor/__pycache__/six.cpython-36.pyc
Binary files differ
diff --git a/setuptools/_vendor/packaging/__about__.py b/setuptools/_vendor/packaging/__about__.py
new file mode 100644
index 0000000..95d330e
--- /dev/null
+++ b/setuptools/_vendor/packaging/__about__.py
@@ -0,0 +1,21 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+__all__ = [
+    "__title__", "__summary__", "__uri__", "__version__", "__author__",
+    "__email__", "__license__", "__copyright__",
+]
+
+__title__ = "packaging"
+__summary__ = "Core utilities for Python packages"
+__uri__ = "https://github.com/pypa/packaging"
+
+__version__ = "16.8"
+
+__author__ = "Donald Stufft and individual contributors"
+__email__ = "donald@stufft.io"
+
+__license__ = "BSD or Apache License, Version 2.0"
+__copyright__ = "Copyright 2014-2016 %s" % __author__
diff --git a/setuptools/_vendor/packaging/__init__.py b/setuptools/_vendor/packaging/__init__.py
new file mode 100644
index 0000000..5ee6220
--- /dev/null
+++ b/setuptools/_vendor/packaging/__init__.py
@@ -0,0 +1,14 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+from .__about__ import (
+    __author__, __copyright__, __email__, __license__, __summary__, __title__,
+    __uri__, __version__
+)
+
+__all__ = [
+    "__title__", "__summary__", "__uri__", "__version__", "__author__",
+    "__email__", "__license__", "__copyright__",
+]
diff --git a/setuptools/_vendor/packaging/__pycache__/__about__.cpython-36.pyc b/setuptools/_vendor/packaging/__pycache__/__about__.cpython-36.pyc
new file mode 100644
index 0000000..d88ad13
--- /dev/null
+++ b/setuptools/_vendor/packaging/__pycache__/__about__.cpython-36.pyc
Binary files differ
diff --git a/setuptools/_vendor/packaging/__pycache__/__init__.cpython-36.pyc b/setuptools/_vendor/packaging/__pycache__/__init__.cpython-36.pyc
new file mode 100644
index 0000000..e9a878d
--- /dev/null
+++ b/setuptools/_vendor/packaging/__pycache__/__init__.cpython-36.pyc
Binary files differ
diff --git a/setuptools/_vendor/packaging/__pycache__/_compat.cpython-36.pyc b/setuptools/_vendor/packaging/__pycache__/_compat.cpython-36.pyc
new file mode 100644
index 0000000..ca59089
--- /dev/null
+++ b/setuptools/_vendor/packaging/__pycache__/_compat.cpython-36.pyc
Binary files differ
diff --git a/setuptools/_vendor/packaging/__pycache__/_structures.cpython-36.pyc b/setuptools/_vendor/packaging/__pycache__/_structures.cpython-36.pyc
new file mode 100644
index 0000000..2640f23
--- /dev/null
+++ b/setuptools/_vendor/packaging/__pycache__/_structures.cpython-36.pyc
Binary files differ
diff --git a/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc b/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc
new file mode 100644
index 0000000..c5139c2
--- /dev/null
+++ b/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc
Binary files differ
diff --git a/setuptools/_vendor/packaging/__pycache__/version.cpython-36.pyc b/setuptools/_vendor/packaging/__pycache__/version.cpython-36.pyc
new file mode 100644
index 0000000..d5f01d8
--- /dev/null
+++ b/setuptools/_vendor/packaging/__pycache__/version.cpython-36.pyc
Binary files differ
diff --git a/setuptools/_vendor/packaging/_compat.py b/setuptools/_vendor/packaging/_compat.py
new file mode 100644
index 0000000..210bb80
--- /dev/null
+++ b/setuptools/_vendor/packaging/_compat.py
@@ -0,0 +1,30 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+# flake8: noqa
+
+if PY3:
+    string_types = str,
+else:
+    string_types = basestring,
+
+
+def with_metaclass(meta, *bases):
+    """
+    Create a base class with a metaclass.
+    """
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
diff --git a/setuptools/_vendor/packaging/_structures.py b/setuptools/_vendor/packaging/_structures.py
new file mode 100644
index 0000000..ccc2786
--- /dev/null
+++ b/setuptools/_vendor/packaging/_structures.py
@@ -0,0 +1,68 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+
+class Infinity(object):
+
+    def __repr__(self):
+        return "Infinity"
+
+    def __hash__(self):
+        return hash(repr(self))
+
+    def __lt__(self, other):
+        return False
+
+    def __le__(self, other):
+        return False
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__)
+
+    def __ne__(self, other):
+        return not isinstance(other, self.__class__)
+
+    def __gt__(self, other):
+        return True
+
+    def __ge__(self, other):
+        return True
+
+    def __neg__(self):
+        return NegativeInfinity
+
+Infinity = Infinity()
+
+
+class NegativeInfinity(object):
+
+    def __repr__(self):
+        return "-Infinity"
+
+    def __hash__(self):
+        return hash(repr(self))
+
+    def __lt__(self, other):
+        return True
+
+    def __le__(self, other):
+        return True
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__)
+
+    def __ne__(self, other):
+        return not isinstance(other, self.__class__)
+
+    def __gt__(self, other):
+        return False
+
+    def __ge__(self, other):
+        return False
+
+    def __neg__(self):
+        return Infinity
+
+NegativeInfinity = NegativeInfinity()
diff --git a/setuptools/_vendor/packaging/markers.py b/setuptools/_vendor/packaging/markers.py
new file mode 100644
index 0000000..031332a
--- /dev/null
+++ b/setuptools/_vendor/packaging/markers.py
@@ -0,0 +1,301 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import operator
+import os
+import platform
+import sys
+
+from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
+from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
+from setuptools.extern.pyparsing import Literal as L  # noqa
+
+from ._compat import string_types
+from .specifiers import Specifier, InvalidSpecifier
+
+
+__all__ = [
+    "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
+    "Marker", "default_environment",
+]
+
+
+class InvalidMarker(ValueError):
+    """
+    An invalid marker was found, users should refer to PEP 508.
+    """
+
+
+class UndefinedComparison(ValueError):
+    """
+    An invalid operation was attempted on a value that doesn't support it.
+    """
+
+
+class UndefinedEnvironmentName(ValueError):
+    """
+    A name was attempted to be used that does not exist inside of the
+    environment.
+    """
+
+
+class Node(object):
+
+    def __init__(self, value):
+        self.value = value
+
+    def __str__(self):
+        return str(self.value)
+
+    def __repr__(self):
+        return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
+
+    def serialize(self):
+        raise NotImplementedError
+
+
+class Variable(Node):
+
+    def serialize(self):
+        return str(self)
+
+
+class Value(Node):
+
+    def serialize(self):
+        return '"{0}"'.format(self)
+
+
+class Op(Node):
+
+    def serialize(self):
+        return str(self)
+
+
+VARIABLE = (
+    L("implementation_version") |
+    L("platform_python_implementation") |
+    L("implementation_name") |
+    L("python_full_version") |
+    L("platform_release") |
+    L("platform_version") |
+    L("platform_machine") |
+    L("platform_system") |
+    L("python_version") |
+    L("sys_platform") |
+    L("os_name") |
+    L("os.name") |  # PEP-345
+    L("sys.platform") |  # PEP-345
+    L("platform.version") |  # PEP-345
+    L("platform.machine") |  # PEP-345
+    L("platform.python_implementation") |  # PEP-345
+    L("python_implementation") |  # undocumented setuptools legacy
+    L("extra")
+)
+ALIASES = {
+    'os.name': 'os_name',
+    'sys.platform': 'sys_platform',
+    'platform.version': 'platform_version',
+    'platform.machine': 'platform_machine',
+    'platform.python_implementation': 'platform_python_implementation',
+    'python_implementation': 'platform_python_implementation'
+}
+VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
+
+VERSION_CMP = (
+    L("===") |
+    L("==") |
+    L(">=") |
+    L("<=") |
+    L("!=") |
+    L("~=") |
+    L(">") |
+    L("<")
+)
+
+MARKER_OP = VERSION_CMP | L("not in") | L("in")
+MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
+
+MARKER_VALUE = QuotedString("'") | QuotedString('"')
+MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
+
+BOOLOP = L("and") | L("or")
+
+MARKER_VAR = VARIABLE | MARKER_VALUE
+
+MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
+MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
+
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+
+MARKER_EXPR = Forward()
+MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
+MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
+
+MARKER = stringStart + MARKER_EXPR + stringEnd
+
+
+def _coerce_parse_result(results):
+    if isinstance(results, ParseResults):
+        return [_coerce_parse_result(i) for i in results]
+    else:
+        return results
+
+
+def _format_marker(marker, first=True):
+    assert isinstance(marker, (list, tuple, string_types))
+
+    # Sometimes we have a structure like [[...]] which is a single item list
+    # where the single item is itself it's own list. In that case we want skip
+    # the rest of this function so that we don't get extraneous () on the
+    # outside.
+    if (isinstance(marker, list) and len(marker) == 1 and
+            isinstance(marker[0], (list, tuple))):
+        return _format_marker(marker[0])
+
+    if isinstance(marker, list):
+        inner = (_format_marker(m, first=False) for m in marker)
+        if first:
+            return " ".join(inner)
+        else:
+            return "(" + " ".join(inner) + ")"
+    elif isinstance(marker, tuple):
+        return " ".join([m.serialize() for m in marker])
+    else:
+        return marker
+
+
+_operators = {
+    "in": lambda lhs, rhs: lhs in rhs,
+    "not in": lambda lhs, rhs: lhs not in rhs,
+    "<": operator.lt,
+    "<=": operator.le,
+    "==": operator.eq,
+    "!=": operator.ne,
+    ">=": operator.ge,
+    ">": operator.gt,
+}
+
+
+def _eval_op(lhs, op, rhs):
+    try:
+        spec = Specifier("".join([op.serialize(), rhs]))
+    except InvalidSpecifier:
+        pass
+    else:
+        return spec.contains(lhs)
+
+    oper = _operators.get(op.serialize())
+    if oper is None:
+        raise UndefinedComparison(
+            "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
+        )
+
+    return oper(lhs, rhs)
+
+
+_undefined = object()
+
+
+def _get_env(environment, name):
+    value = environment.get(name, _undefined)
+
+    if value is _undefined:
+        raise UndefinedEnvironmentName(
+            "{0!r} does not exist in evaluation environment.".format(name)
+        )
+
+    return value
+
+
+def _evaluate_markers(markers, environment):
+    groups = [[]]
+
+    for marker in markers:
+        assert isinstance(marker, (list, tuple, string_types))
+
+        if isinstance(marker, list):
+            groups[-1].append(_evaluate_markers(marker, environment))
+        elif isinstance(marker, tuple):
+            lhs, op, rhs = marker
+
+            if isinstance(lhs, Variable):
+                lhs_value = _get_env(environment, lhs.value)
+                rhs_value = rhs.value
+            else:
+                lhs_value = lhs.value
+                rhs_value = _get_env(environment, rhs.value)
+
+            groups[-1].append(_eval_op(lhs_value, op, rhs_value))
+        else:
+            assert marker in ["and", "or"]
+            if marker == "or":
+                groups.append([])
+
+    return any(all(item) for item in groups)
+
+
+def format_full_version(info):
+    version = '{0.major}.{0.minor}.{0.micro}'.format(info)
+    kind = info.releaselevel
+    if kind != 'final':
+        version += kind[0] + str(info.serial)
+    return version
+
+
+def default_environment():
+    if hasattr(sys, 'implementation'):
+        iver = format_full_version(sys.implementation.version)
+        implementation_name = sys.implementation.name
+    else:
+        iver = '0'
+        implementation_name = ''
+
+    return {
+        "implementation_name": implementation_name,
+        "implementation_version": iver,
+        "os_name": os.name,
+        "platform_machine": platform.machine(),
+        "platform_release": platform.release(),
+        "platform_system": platform.system(),
+        "platform_version": platform.version(),
+        "python_full_version": platform.python_version(),
+        "platform_python_implementation": platform.python_implementation(),
+        "python_version": platform.python_version()[:3],
+        "sys_platform": sys.platform,
+    }
+
+
+class Marker(object):
+
+    def __init__(self, marker):
+        try:
+            self._markers = _coerce_parse_result(MARKER.parseString(marker))
+        except ParseException as e:
+            err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
+                marker, marker[e.loc:e.loc + 8])
+            raise InvalidMarker(err_str)
+
+    def __str__(self):
+        return _format_marker(self._markers)
+
+    def __repr__(self):
+        return "<Marker({0!r})>".format(str(self))
+
+    def evaluate(self, environment=None):
+        """Evaluate a marker.
+
+        Return the boolean from evaluating the given marker against the
+        environment. environment is an optional argument to override all or
+        part of the determined environment.
+
+        The environment is determined from the current Python process.
+        """
+        current_environment = default_environment()
+        if environment is not None:
+            current_environment.update(environment)
+
+        return _evaluate_markers(self._markers, current_environment)
diff --git a/setuptools/_vendor/packaging/requirements.py b/setuptools/_vendor/packaging/requirements.py
new file mode 100644
index 0000000..5b49341
--- /dev/null
+++ b/setuptools/_vendor/packaging/requirements.py
@@ -0,0 +1,127 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import string
+import re
+
+from setuptools.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
+from setuptools.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
+from setuptools.extern.pyparsing import Literal as L  # noqa
+from setuptools.extern.six.moves.urllib import parse as urlparse
+
+from .markers import MARKER_EXPR, Marker
+from .specifiers import LegacySpecifier, Specifier, SpecifierSet
+
+
+class InvalidRequirement(ValueError):
+    """
+    An invalid requirement was found, users should refer to PEP 508.
+    """
+
+
+ALPHANUM = Word(string.ascii_letters + string.digits)
+
+LBRACKET = L("[").suppress()
+RBRACKET = L("]").suppress()
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+COMMA = L(",").suppress()
+SEMICOLON = L(";").suppress()
+AT = L("@").suppress()
+
+PUNCTUATION = Word("-_.")
+IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
+IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
+
+NAME = IDENTIFIER("name")
+EXTRA = IDENTIFIER
+
+URI = Regex(r'[^ ]+')("url")
+URL = (AT + URI)
+
+EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
+EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
+
+VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
+VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
+
+VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
+VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
+                       joinString=",", adjacent=False)("_raw_spec")
+_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
+_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
+
+VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
+VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
+
+MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
+MARKER_EXPR.setParseAction(
+    lambda s, l, t: Marker(s[t._original_start:t._original_end])
+)
+MARKER_SEPERATOR = SEMICOLON
+MARKER = MARKER_SEPERATOR + MARKER_EXPR
+
+VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
+URL_AND_MARKER = URL + Optional(MARKER)
+
+NAMED_REQUIREMENT = \
+    NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
+
+REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
+
+
+class Requirement(object):
+    """Parse a requirement.
+
+    Parse a given requirement string into its parts, such as name, specifier,
+    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
+    string.
+    """
+
+    # TODO: Can we test whether something is contained within a requirement?
+    #       If so how do we do that? Do we need to test against the _name_ of
+    #       the thing as well as the version? What about the markers?
+    # TODO: Can we normalize the name and extra name?
+
+    def __init__(self, requirement_string):
+        try:
+            req = REQUIREMENT.parseString(requirement_string)
+        except ParseException as e:
+            raise InvalidRequirement(
+                "Invalid requirement, parse error at \"{0!r}\"".format(
+                    requirement_string[e.loc:e.loc + 8]))
+
+        self.name = req.name
+        if req.url:
+            parsed_url = urlparse.urlparse(req.url)
+            if not (parsed_url.scheme and parsed_url.netloc) or (
+                    not parsed_url.scheme and not parsed_url.netloc):
+                raise InvalidRequirement("Invalid URL given")
+            self.url = req.url
+        else:
+            self.url = None
+        self.extras = set(req.extras.asList() if req.extras else [])
+        self.specifier = SpecifierSet(req.specifier)
+        self.marker = req.marker if req.marker else None
+
+    def __str__(self):
+        parts = [self.name]
+
+        if self.extras:
+            parts.append("[{0}]".format(",".join(sorted(self.extras))))
+
+        if self.specifier:
+            parts.append(str(self.specifier))
+
+        if self.url:
+            parts.append("@ {0}".format(self.url))
+
+        if self.marker:
+            parts.append("; {0}".format(self.marker))
+
+        return "".join(parts)
+
+    def __repr__(self):
+        return "<Requirement({0!r})>".format(str(self))
diff --git a/setuptools/_vendor/packaging/specifiers.py b/setuptools/_vendor/packaging/specifiers.py
new file mode 100644
index 0000000..7f5a76c
--- /dev/null
+++ b/setuptools/_vendor/packaging/specifiers.py
@@ -0,0 +1,774 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import abc
+import functools
+import itertools
+import re
+
+from ._compat import string_types, with_metaclass
+from .version import Version, LegacyVersion, parse
+
+
+class InvalidSpecifier(ValueError):
+    """
+    An invalid specifier was found, users should refer to PEP 440.
+    """
+
+
+class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
+
+    @abc.abstractmethod
+    def __str__(self):
+        """
+        Returns the str representation of this Specifier like object. This
+        should be representative of the Specifier itself.
+        """
+
+    @abc.abstractmethod
+    def __hash__(self):
+        """
+        Returns a hash value for this Specifier like object.
+        """
+
+    @abc.abstractmethod
+    def __eq__(self, other):
+        """
+        Returns a boolean representing whether or not the two Specifier like
+        objects are equal.
+        """
+
+    @abc.abstractmethod
+    def __ne__(self, other):
+        """
+        Returns a boolean representing whether or not the two Specifier like
+        objects are not equal.
+        """
+
+    @abc.abstractproperty
+    def prereleases(self):
+        """
+        Returns whether or not pre-releases as a whole are allowed by this
+        specifier.
+        """
+
+    @prereleases.setter
+    def prereleases(self, value):
+        """
+        Sets whether or not pre-releases as a whole are allowed by this
+        specifier.
+        """
+
+    @abc.abstractmethod
+    def contains(self, item, prereleases=None):
+        """
+        Determines if the given item is contained within this specifier.
+        """
+
+    @abc.abstractmethod
+    def filter(self, iterable, prereleases=None):
+        """
+        Takes an iterable of items and filters them so that only items which
+        are contained within this specifier are allowed in it.
+        """
+
+
+class _IndividualSpecifier(BaseSpecifier):
+
+    _operators = {}
+
+    def __init__(self, spec="", prereleases=None):
+        match = self._regex.search(spec)
+        if not match:
+            raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
+
+        self._spec = (
+            match.group("operator").strip(),
+            match.group("version").strip(),
+        )
+
+        # Store whether or not this Specifier should accept prereleases
+        self._prereleases = prereleases
+
+    def __repr__(self):
+        pre = (
+            ", prereleases={0!r}".format(self.prereleases)
+            if self._prereleases is not None
+            else ""
+        )
+
+        return "<{0}({1!r}{2})>".format(
+            self.__class__.__name__,
+            str(self),
+            pre,
+        )
+
+    def __str__(self):
+        return "{0}{1}".format(*self._spec)
+
+    def __hash__(self):
+        return hash(self._spec)
+
+    def __eq__(self, other):
+        if isinstance(other, string_types):
+            try:
+                other = self.__class__(other)
+            except InvalidSpecifier:
+                return NotImplemented
+        elif not isinstance(other, self.__class__):
+            return NotImplemented
+
+        return self._spec == other._spec
+
+    def __ne__(self, other):
+        if isinstance(other, string_types):
+            try:
+                other = self.__class__(other)
+            except InvalidSpecifier:
+                return NotImplemented
+        elif not isinstance(other, self.__class__):
+            return NotImplemented
+
+        return self._spec != other._spec
+
+    def _get_operator(self, op):
+        return getattr(self, "_compare_{0}".format(self._operators[op]))
+
+    def _coerce_version(self, version):
+        if not isinstance(version, (LegacyVersion, Version)):
+            version = parse(version)
+        return version
+
+    @property
+    def operator(self):
+        return self._spec[0]
+
+    @property
+    def version(self):
+        return self._spec[1]
+
+    @property
+    def prereleases(self):
+        return self._prereleases
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+    def __contains__(self, item):
+        return self.contains(item)
+
+    def contains(self, item, prereleases=None):
+        # Determine if prereleases are to be allowed or not.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # Normalize item to a Version or LegacyVersion, this allows us to have
+        # a shortcut for ``"2.0" in Specifier(">=2")
+        item = self._coerce_version(item)
+
+        # Determine if we should be supporting prereleases in this specifier
+        # or not, if we do not support prereleases than we can short circuit
+        # logic if this version is a prereleases.
+        if item.is_prerelease and not prereleases:
+            return False
+
+        # Actually do the comparison to determine if this item is contained
+        # within this Specifier or not.
+        return self._get_operator(self.operator)(item, self.version)
+
+    def filter(self, iterable, prereleases=None):
+        yielded = False
+        found_prereleases = []
+
+        kw = {"prereleases": prereleases if prereleases is not None else True}
+
+        # Attempt to iterate over all the values in the iterable and if any of
+        # them match, yield them.
+        for version in iterable:
+            parsed_version = self._coerce_version(version)
+
+            if self.contains(parsed_version, **kw):
+                # If our version is a prerelease, and we were not set to allow
+                # prereleases, then we'll store it for later incase nothing
+                # else matches this specifier.
+                if (parsed_version.is_prerelease and not
+                        (prereleases or self.prereleases)):
+                    found_prereleases.append(version)
+                # Either this is not a prerelease, or we should have been
+                # accepting prereleases from the begining.
+                else:
+                    yielded = True
+                    yield version
+
+        # Now that we've iterated over everything, determine if we've yielded
+        # any values, and if we have not and we have any prereleases stored up
+        # then we will go ahead and yield the prereleases.
+        if not yielded and found_prereleases:
+            for version in found_prereleases:
+                yield version
+
+
+class LegacySpecifier(_IndividualSpecifier):
+
+    _regex_str = (
+        r"""
+        (?P<operator>(==|!=|<=|>=|<|>))
+        \s*
+        (?P<version>
+            [^,;\s)]* # Since this is a "legacy" specifier, and the version
+                      # string can be just about anything, we match everything
+                      # except for whitespace, a semi-colon for marker support,
+                      # a closing paren since versions can be enclosed in
+                      # them, and a comma since it's a version separator.
+        )
+        """
+    )
+
+    _regex = re.compile(
+        r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    _operators = {
+        "==": "equal",
+        "!=": "not_equal",
+        "<=": "less_than_equal",
+        ">=": "greater_than_equal",
+        "<": "less_than",
+        ">": "greater_than",
+    }
+
+    def _coerce_version(self, version):
+        if not isinstance(version, LegacyVersion):
+            version = LegacyVersion(str(version))
+        return version
+
+    def _compare_equal(self, prospective, spec):
+        return prospective == self._coerce_version(spec)
+
+    def _compare_not_equal(self, prospective, spec):
+        return prospective != self._coerce_version(spec)
+
+    def _compare_less_than_equal(self, prospective, spec):
+        return prospective <= self._coerce_version(spec)
+
+    def _compare_greater_than_equal(self, prospective, spec):
+        return prospective >= self._coerce_version(spec)
+
+    def _compare_less_than(self, prospective, spec):
+        return prospective < self._coerce_version(spec)
+
+    def _compare_greater_than(self, prospective, spec):
+        return prospective > self._coerce_version(spec)
+
+
+def _require_version_compare(fn):
+    @functools.wraps(fn)
+    def wrapped(self, prospective, spec):
+        if not isinstance(prospective, Version):
+            return False
+        return fn(self, prospective, spec)
+    return wrapped
+
+
+class Specifier(_IndividualSpecifier):
+
+    _regex_str = (
+        r"""
+        (?P<operator>(~=|==|!=|<=|>=|<|>|===))
+        (?P<version>
+            (?:
+                # The identity operators allow for an escape hatch that will
+                # do an exact string match of the version you wish to install.
+                # This will not be parsed by PEP 440 and we cannot determine
+                # any semantic meaning from it. This operator is discouraged
+                # but included entirely as an escape hatch.
+                (?<====)  # Only match for the identity operator
+                \s*
+                [^\s]*    # We just match everything, except for whitespace
+                          # since we are only testing for strict identity.
+            )
+            |
+            (?:
+                # The (non)equality operators allow for wild card and local
+                # versions to be specified so we have to define these two
+                # operators separately to enable that.
+                (?<===|!=)            # Only match for equals and not equals
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)*   # release
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+
+                # You cannot use a wild card and a dev or local version
+                # together so group them with a | and make them optional.
+                (?:
+                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
+                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
+                    |
+                    \.\*  # Wild card syntax of .*
+                )?
+            )
+            |
+            (?:
+                # The compatible operator requires at least two digits in the
+                # release segment.
+                (?<=~=)               # Only match for the compatible operator
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
+            )
+            |
+            (?:
+                # All other operators only allow a sub set of what the
+                # (non)equality operators do. Specifically they do not allow
+                # local versions to be specified nor do they allow the prefix
+                # matching wild cards.
+                (?<!==|!=|~=)         # We have special cases for these
+                                      # operators so we want to make sure they
+                                      # don't match here.
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)*   # release
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
+            )
+        )
+        """
+    )
+
+    _regex = re.compile(
+        r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    _operators = {
+        "~=": "compatible",
+        "==": "equal",
+        "!=": "not_equal",
+        "<=": "less_than_equal",
+        ">=": "greater_than_equal",
+        "<": "less_than",
+        ">": "greater_than",
+        "===": "arbitrary",
+    }
+
+    @_require_version_compare
+    def _compare_compatible(self, prospective, spec):
+        # Compatible releases have an equivalent combination of >= and ==. That
+        # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
+        # implement this in terms of the other specifiers instead of
+        # implementing it ourselves. The only thing we need to do is construct
+        # the other specifiers.
+
+        # We want everything but the last item in the version, but we want to
+        # ignore post and dev releases and we want to treat the pre-release as
+        # it's own separate segment.
+        prefix = ".".join(
+            list(
+                itertools.takewhile(
+                    lambda x: (not x.startswith("post") and not
+                               x.startswith("dev")),
+                    _version_split(spec),
+                )
+            )[:-1]
+        )
+
+        # Add the prefix notation to the end of our string
+        prefix += ".*"
+
+        return (self._get_operator(">=")(prospective, spec) and
+                self._get_operator("==")(prospective, prefix))
+
+    @_require_version_compare
+    def _compare_equal(self, prospective, spec):
+        # We need special logic to handle prefix matching
+        if spec.endswith(".*"):
+            # In the case of prefix matching we want to ignore local segment.
+            prospective = Version(prospective.public)
+            # Split the spec out by dots, and pretend that there is an implicit
+            # dot in between a release segment and a pre-release segment.
+            spec = _version_split(spec[:-2])  # Remove the trailing .*
+
+            # Split the prospective version out by dots, and pretend that there
+            # is an implicit dot in between a release segment and a pre-release
+            # segment.
+            prospective = _version_split(str(prospective))
+
+            # Shorten the prospective version to be the same length as the spec
+            # so that we can determine if the specifier is a prefix of the
+            # prospective version or not.
+            prospective = prospective[:len(spec)]
+
+            # Pad out our two sides with zeros so that they both equal the same
+            # length.
+            spec, prospective = _pad_version(spec, prospective)
+        else:
+            # Convert our spec string into a Version
+            spec = Version(spec)
+
+            # If the specifier does not have a local segment, then we want to
+            # act as if the prospective version also does not have a local
+            # segment.
+            if not spec.local:
+                prospective = Version(prospective.public)
+
+        return prospective == spec
+
+    @_require_version_compare
+    def _compare_not_equal(self, prospective, spec):
+        return not self._compare_equal(prospective, spec)
+
+    @_require_version_compare
+    def _compare_less_than_equal(self, prospective, spec):
+        return prospective <= Version(spec)
+
+    @_require_version_compare
+    def _compare_greater_than_equal(self, prospective, spec):
+        return prospective >= Version(spec)
+
+    @_require_version_compare
+    def _compare_less_than(self, prospective, spec):
+        # Convert our spec to a Version instance, since we'll want to work with
+        # it as a version.
+        spec = Version(spec)
+
+        # Check to see if the prospective version is less than the spec
+        # version. If it's not we can short circuit and just return False now
+        # instead of doing extra unneeded work.
+        if not prospective < spec:
+            return False
+
+        # This special case is here so that, unless the specifier itself
+        # includes is a pre-release version, that we do not accept pre-release
+        # versions for the version mentioned in the specifier (e.g. <3.1 should
+        # not match 3.1.dev0, but should match 3.0.dev0).
+        if not spec.is_prerelease and prospective.is_prerelease:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # If we've gotten to here, it means that prospective version is both
+        # less than the spec version *and* it's not a pre-release of the same
+        # version in the spec.
+        return True
+
+    @_require_version_compare
+    def _compare_greater_than(self, prospective, spec):
+        # Convert our spec to a Version instance, since we'll want to work with
+        # it as a version.
+        spec = Version(spec)
+
+        # Check to see if the prospective version is greater than the spec
+        # version. If it's not we can short circuit and just return False now
+        # instead of doing extra unneeded work.
+        if not prospective > spec:
+            return False
+
+        # This special case is here so that, unless the specifier itself
+        # includes is a post-release version, that we do not accept
+        # post-release versions for the version mentioned in the specifier
+        # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
+        if not spec.is_postrelease and prospective.is_postrelease:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # Ensure that we do not allow a local version of the version mentioned
+        # in the specifier, which is techincally greater than, to match.
+        if prospective.local is not None:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # If we've gotten to here, it means that prospective version is both
+        # greater than the spec version *and* it's not a pre-release of the
+        # same version in the spec.
+        return True
+
+    def _compare_arbitrary(self, prospective, spec):
+        return str(prospective).lower() == str(spec).lower()
+
+    @property
+    def prereleases(self):
+        # If there is an explicit prereleases set for this, then we'll just
+        # blindly use that.
+        if self._prereleases is not None:
+            return self._prereleases
+
+        # Look at all of our specifiers and determine if they are inclusive
+        # operators, and if they are if they are including an explicit
+        # prerelease.
+        operator, version = self._spec
+        if operator in ["==", ">=", "<=", "~=", "==="]:
+            # The == specifier can include a trailing .*, if it does we
+            # want to remove before parsing.
+            if operator == "==" and version.endswith(".*"):
+                version = version[:-2]
+
+            # Parse the version, and if it is a pre-release than this
+            # specifier allows pre-releases.
+            if parse(version).is_prerelease:
+                return True
+
+        return False
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+
+_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
+
+
+def _version_split(version):
+    result = []
+    for item in version.split("."):
+        match = _prefix_regex.search(item)
+        if match:
+            result.extend(match.groups())
+        else:
+            result.append(item)
+    return result
+
+
+def _pad_version(left, right):
+    left_split, right_split = [], []
+
+    # Get the release segment of our versions
+    left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
+    right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
+
+    # Get the rest of our versions
+    left_split.append(left[len(left_split[0]):])
+    right_split.append(right[len(right_split[0]):])
+
+    # Insert our padding
+    left_split.insert(
+        1,
+        ["0"] * max(0, len(right_split[0]) - len(left_split[0])),
+    )
+    right_split.insert(
+        1,
+        ["0"] * max(0, len(left_split[0]) - len(right_split[0])),
+    )
+
+    return (
+        list(itertools.chain(*left_split)),
+        list(itertools.chain(*right_split)),
+    )
+
+
+class SpecifierSet(BaseSpecifier):
+
+    def __init__(self, specifiers="", prereleases=None):
+        # Split on , to break each indidivual specifier into it's own item, and
+        # strip each item to remove leading/trailing whitespace.
+        specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+
+        # Parsed each individual specifier, attempting first to make it a
+        # Specifier and falling back to a LegacySpecifier.
+        parsed = set()
+        for specifier in specifiers:
+            try:
+                parsed.add(Specifier(specifier))
+            except InvalidSpecifier:
+                parsed.add(LegacySpecifier(specifier))
+
+        # Turn our parsed specifiers into a frozen set and save them for later.
+        self._specs = frozenset(parsed)
+
+        # Store our prereleases value so we can use it later to determine if
+        # we accept prereleases or not.
+        self._prereleases = prereleases
+
+    def __repr__(self):
+        pre = (
+            ", prereleases={0!r}".format(self.prereleases)
+            if self._prereleases is not None
+            else ""
+        )
+
+        return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
+
+    def __str__(self):
+        return ",".join(sorted(str(s) for s in self._specs))
+
+    def __hash__(self):
+        return hash(self._specs)
+
+    def __and__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        specifier = SpecifierSet()
+        specifier._specs = frozenset(self._specs | other._specs)
+
+        if self._prereleases is None and other._prereleases is not None:
+            specifier._prereleases = other._prereleases
+        elif self._prereleases is not None and other._prereleases is None:
+            specifier._prereleases = self._prereleases
+        elif self._prereleases == other._prereleases:
+            specifier._prereleases = self._prereleases
+        else:
+            raise ValueError(
+                "Cannot combine SpecifierSets with True and False prerelease "
+                "overrides."
+            )
+
+        return specifier
+
+    def __eq__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif isinstance(other, _IndividualSpecifier):
+            other = SpecifierSet(str(other))
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        return self._specs == other._specs
+
+    def __ne__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif isinstance(other, _IndividualSpecifier):
+            other = SpecifierSet(str(other))
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        return self._specs != other._specs
+
+    def __len__(self):
+        return len(self._specs)
+
+    def __iter__(self):
+        return iter(self._specs)
+
+    @property
+    def prereleases(self):
+        # If we have been given an explicit prerelease modifier, then we'll
+        # pass that through here.
+        if self._prereleases is not None:
+            return self._prereleases
+
+        # If we don't have any specifiers, and we don't have a forced value,
+        # then we'll just return None since we don't know if this should have
+        # pre-releases or not.
+        if not self._specs:
+            return None
+
+        # Otherwise we'll see if any of the given specifiers accept
+        # prereleases, if any of them do we'll return True, otherwise False.
+        return any(s.prereleases for s in self._specs)
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+    def __contains__(self, item):
+        return self.contains(item)
+
+    def contains(self, item, prereleases=None):
+        # Ensure that our item is a Version or LegacyVersion instance.
+        if not isinstance(item, (LegacyVersion, Version)):
+            item = parse(item)
+
+        # Determine if we're forcing a prerelease or not, if we're not forcing
+        # one for this particular filter call, then we'll use whatever the
+        # SpecifierSet thinks for whether or not we should support prereleases.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # We can determine if we're going to allow pre-releases by looking to
+        # see if any of the underlying items supports them. If none of them do
+        # and this item is a pre-release then we do not allow it and we can
+        # short circuit that here.
+        # Note: This means that 1.0.dev1 would not be contained in something
+        #       like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
+        if not prereleases and item.is_prerelease:
+            return False
+
+        # We simply dispatch to the underlying specs here to make sure that the
+        # given version is contained within all of them.
+        # Note: This use of all() here means that an empty set of specifiers
+        #       will always return True, this is an explicit design decision.
+        return all(
+            s.contains(item, prereleases=prereleases)
+            for s in self._specs
+        )
+
+    def filter(self, iterable, prereleases=None):
+        # Determine if we're forcing a prerelease or not, if we're not forcing
+        # one for this particular filter call, then we'll use whatever the
+        # SpecifierSet thinks for whether or not we should support prereleases.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # If we have any specifiers, then we want to wrap our iterable in the
+        # filter method for each one, this will act as a logical AND amongst
+        # each specifier.
+        if self._specs:
+            for spec in self._specs:
+                iterable = spec.filter(iterable, prereleases=bool(prereleases))
+            return iterable
+        # If we do not have any specifiers, then we need to have a rough filter
+        # which will filter out any pre-releases, unless there are no final
+        # releases, and which will filter out LegacyVersion in general.
+        else:
+            filtered = []
+            found_prereleases = []
+
+            for item in iterable:
+                # Ensure that we some kind of Version class for this item.
+                if not isinstance(item, (LegacyVersion, Version)):
+                    parsed_version = parse(item)
+                else:
+                    parsed_version = item
+
+                # Filter out any item which is parsed as a LegacyVersion
+                if isinstance(parsed_version, LegacyVersion):
+                    continue
+
+                # Store any item which is a pre-release for later unless we've
+                # already found a final version or we are accepting prereleases
+                if parsed_version.is_prerelease and not prereleases:
+                    if not filtered:
+                        found_prereleases.append(item)
+                else:
+                    filtered.append(item)
+
+            # If we've found no items except for pre-releases, then we'll go
+            # ahead and use the pre-releases
+            if not filtered and found_prereleases and prereleases is None:
+                return found_prereleases
+
+            return filtered
diff --git a/setuptools/_vendor/packaging/utils.py b/setuptools/_vendor/packaging/utils.py
new file mode 100644
index 0000000..942387c
--- /dev/null
+++ b/setuptools/_vendor/packaging/utils.py
@@ -0,0 +1,14 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import re
+
+
+_canonicalize_regex = re.compile(r"[-_.]+")
+
+
+def canonicalize_name(name):
+    # This is taken from PEP 503.
+    return _canonicalize_regex.sub("-", name).lower()
diff --git a/setuptools/_vendor/packaging/version.py b/setuptools/_vendor/packaging/version.py
new file mode 100644
index 0000000..83b5ee8
--- /dev/null
+++ b/setuptools/_vendor/packaging/version.py
@@ -0,0 +1,393 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import collections
+import itertools
+import re
+
+from ._structures import Infinity
+
+
+__all__ = [
+    "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
+]
+
+
+_Version = collections.namedtuple(
+    "_Version",
+    ["epoch", "release", "dev", "pre", "post", "local"],
+)
+
+
+def parse(version):
+    """
+    Parse the given version string and return either a :class:`Version` object
+    or a :class:`LegacyVersion` object depending on if the given version is
+    a valid PEP 440 version or a legacy version.
+    """
+    try:
+        return Version(version)
+    except InvalidVersion:
+        return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+    """
+    An invalid version was found, users should refer to PEP 440.
+    """
+
+
+class _BaseVersion(object):
+
+    def __hash__(self):
+        return hash(self._key)
+
+    def __lt__(self, other):
+        return self._compare(other, lambda s, o: s < o)
+
+    def __le__(self, other):
+        return self._compare(other, lambda s, o: s <= o)
+
+    def __eq__(self, other):
+        return self._compare(other, lambda s, o: s == o)
+
+    def __ge__(self, other):
+        return self._compare(other, lambda s, o: s >= o)
+
+    def __gt__(self, other):
+        return self._compare(other, lambda s, o: s > o)
+
+    def __ne__(self, other):
+        return self._compare(other, lambda s, o: s != o)
+
+    def _compare(self, other, method):
+        if not isinstance(other, _BaseVersion):
+            return NotImplemented
+
+        return method(self._key, other._key)
+
+
+class LegacyVersion(_BaseVersion):
+
+    def __init__(self, version):
+        self._version = str(version)
+        self._key = _legacy_cmpkey(self._version)
+
+    def __str__(self):
+        return self._version
+
+    def __repr__(self):
+        return "<LegacyVersion({0})>".format(repr(str(self)))
+
+    @property
+    def public(self):
+        return self._version
+
+    @property
+    def base_version(self):
+        return self._version
+
+    @property
+    def local(self):
+        return None
+
+    @property
+    def is_prerelease(self):
+        return False
+
+    @property
+    def is_postrelease(self):
+        return False
+
+
+_legacy_version_component_re = re.compile(
+    r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
+)
+
+_legacy_version_replacement_map = {
+    "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
+}
+
+
+def _parse_version_parts(s):
+    for part in _legacy_version_component_re.split(s):
+        part = _legacy_version_replacement_map.get(part, part)
+
+        if not part or part == ".":
+            continue
+
+        if part[:1] in "0123456789":
+            # pad for numeric comparison
+            yield part.zfill(8)
+        else:
+            yield "*" + part
+
+    # ensure that alpha/beta/candidate are before final
+    yield "*final"
+
+
+def _legacy_cmpkey(version):
+    # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
+    # greater than or equal to 0. This will effectively put the LegacyVersion,
+    # which uses the defacto standard originally implemented by setuptools,
+    # as before all PEP 440 versions.
+    epoch = -1
+
+    # This scheme is taken from pkg_resources.parse_version setuptools prior to
+    # it's adoption of the packaging library.
+    parts = []
+    for part in _parse_version_parts(version.lower()):
+        if part.startswith("*"):
+            # remove "-" before a prerelease tag
+            if part < "*final":
+                while parts and parts[-1] == "*final-":
+                    parts.pop()
+
+            # remove trailing zeros from each series of numeric parts
+            while parts and parts[-1] == "00000000":
+                parts.pop()
+
+        parts.append(part)
+    parts = tuple(parts)
+
+    return epoch, parts
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+    v?
+    (?:
+        (?:(?P<epoch>[0-9]+)!)?                           # epoch
+        (?P<release>[0-9]+(?:\.[0-9]+)*)                  # release segment
+        (?P<pre>                                          # pre-release
+            [-_\.]?
+            (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P<pre_n>[0-9]+)?
+        )?
+        (?P<post>                                         # post release
+            (?:-(?P<post_n1>[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?P<post_l>post|rev|r)
+                [-_\.]?
+                (?P<post_n2>[0-9]+)?
+            )
+        )?
+        (?P<dev>                                          # dev release
+            [-_\.]?
+            (?P<dev_l>dev)
+            [-_\.]?
+            (?P<dev_n>[0-9]+)?
+        )?
+    )
+    (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return "<Version({0})>".format(repr(str(self)))
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(".post{0}".format(self._version.post[1]))
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(".dev{0}".format(self._version.dev[1]))
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                "+{0}".format(".".join(str(x) for x in self._version.local))
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/setuptools/_vendor/pyparsing.py b/setuptools/_vendor/pyparsing.py
new file mode 100644
index 0000000..a212243
--- /dev/null
+++ b/setuptools/_vendor/pyparsing.py
@@ -0,0 +1,5696 @@
+# module pyparsing.py

+#

+# Copyright (c) 2003-2016  Paul T. McGuire

+#

+# Permission is hereby granted, free of charge, to any person obtaining

+# a copy of this software and associated documentation files (the

+# "Software"), to deal in the Software without restriction, including

+# without limitation the rights to use, copy, modify, merge, publish,

+# distribute, sublicense, and/or sell copies of the Software, and to

+# permit persons to whom the Software is furnished to do so, subject to

+# the following conditions:

+#

+# The above copyright notice and this permission notice shall be

+# included in all copies or substantial portions of the Software.

+#

+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,

+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF

+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.

+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY

+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,

+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE

+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+#

+

+__doc__ = \

+"""

+pyparsing module - Classes and methods to define and execute parsing grammars

+

+The pyparsing module is an alternative approach to creating and executing simple grammars,

+vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you

+don't need to learn a new syntax for defining grammars or matching expressions - the parsing module

+provides a library of classes that you use to construct the grammar directly in Python.

+

+Here is a program to parse "Hello, World!" (or any greeting of the form 

+C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements 

+(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to

+L{Literal} expressions)::

+

+    from pyparsing import Word, alphas

+

+    # define grammar of a greeting

+    greet = Word(alphas) + "," + Word(alphas) + "!"

+

+    hello = "Hello, World!"

+    print (hello, "->", greet.parseString(hello))

+

+The program outputs the following::

+

+    Hello, World! -> ['Hello', ',', 'World', '!']

+

+The Python representation of the grammar is quite readable, owing to the self-explanatory

+class names, and the use of '+', '|' and '^' operators.

+

+The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an

+object with named attributes.

+

+The pyparsing module handles some of the problems that are typically vexing when writing text parsers:

+ - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)

+ - quoted strings

+ - embedded comments

+"""

+

+__version__ = "2.1.10"

+__versionTime__ = "07 Oct 2016 01:31 UTC"

+__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"

+

+import string

+from weakref import ref as wkref

+import copy

+import sys

+import warnings

+import re

+import sre_constants

+import collections

+import pprint

+import traceback

+import types

+from datetime import datetime

+

+try:

+    from _thread import RLock

+except ImportError:

+    from threading import RLock

+

+try:

+    from collections import OrderedDict as _OrderedDict

+except ImportError:

+    try:

+        from ordereddict import OrderedDict as _OrderedDict

+    except ImportError:

+        _OrderedDict = None

+

+#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )

+

+__all__ = [

+'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',

+'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',

+'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',

+'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',

+'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',

+'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 

+'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',

+'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',

+'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',

+'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',

+'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',

+'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',

+'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',

+'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 

+'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',

+'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',

+'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',

+'CloseMatch', 'tokenMap', 'pyparsing_common',

+]

+

+system_version = tuple(sys.version_info)[:3]

+PY_3 = system_version[0] == 3

+if PY_3:

+    _MAX_INT = sys.maxsize

+    basestring = str

+    unichr = chr

+    _ustr = str

+

+    # build list of single arg builtins, that can be used as parse actions

+    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]

+

+else:

+    _MAX_INT = sys.maxint

+    range = xrange

+

+    def _ustr(obj):

+        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries

+           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It

+           then < returns the unicode object | encodes it with the default encoding | ... >.

+        """

+        if isinstance(obj,unicode):

+            return obj

+

+        try:

+            # If this works, then _ustr(obj) has the same behaviour as str(obj), so

+            # it won't break any existing code.

+            return str(obj)

+

+        except UnicodeEncodeError:

+            # Else encode it

+            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')

+            xmlcharref = Regex('&#\d+;')

+            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])

+            return xmlcharref.transformString(ret)

+

+    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions

+    singleArgBuiltins = []

+    import __builtin__

+    for fname in "sum len sorted reversed list tuple set any all min max".split():

+        try:

+            singleArgBuiltins.append(getattr(__builtin__,fname))

+        except AttributeError:

+            continue

+            

+_generatorType = type((y for y in range(1)))

+ 

+def _xml_escape(data):

+    """Escape &, <, >, ", ', etc. in a string of data."""

+

+    # ampersand must be replaced first

+    from_symbols = '&><"\''

+    to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())

+    for from_,to_ in zip(from_symbols, to_symbols):

+        data = data.replace(from_, to_)

+    return data

+

+class _Constants(object):

+    pass

+

+alphas     = string.ascii_uppercase + string.ascii_lowercase

+nums       = "0123456789"

+hexnums    = nums + "ABCDEFabcdef"

+alphanums  = alphas + nums

+_bslash    = chr(92)

+printables = "".join(c for c in string.printable if c not in string.whitespace)

+

+class ParseBaseException(Exception):

+    """base exception class for all parsing runtime exceptions"""

+    # Performance tuning: we construct a *lot* of these, so keep this

+    # constructor as small and fast as possible

+    def __init__( self, pstr, loc=0, msg=None, elem=None ):

+        self.loc = loc

+        if msg is None:

+            self.msg = pstr

+            self.pstr = ""

+        else:

+            self.msg = msg

+            self.pstr = pstr

+        self.parserElement = elem

+        self.args = (pstr, loc, msg)

+

+    @classmethod

+    def _from_exception(cls, pe):

+        """

+        internal factory method to simplify creating one type of ParseException 

+        from another - avoids having __init__ signature conflicts among subclasses

+        """

+        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)

+

+    def __getattr__( self, aname ):

+        """supported attributes by name are:

+            - lineno - returns the line number of the exception text

+            - col - returns the column number of the exception text

+            - line - returns the line containing the exception text

+        """

+        if( aname == "lineno" ):

+            return lineno( self.loc, self.pstr )

+        elif( aname in ("col", "column") ):

+            return col( self.loc, self.pstr )

+        elif( aname == "line" ):

+            return line( self.loc, self.pstr )

+        else:

+            raise AttributeError(aname)

+

+    def __str__( self ):

+        return "%s (at char %d), (line:%d, col:%d)" % \

+                ( self.msg, self.loc, self.lineno, self.column )

+    def __repr__( self ):

+        return _ustr(self)

+    def markInputline( self, markerString = ">!<" ):

+        """Extracts the exception line from the input string, and marks

+           the location of the exception with a special symbol.

+        """

+        line_str = self.line

+        line_column = self.column - 1

+        if markerString:

+            line_str = "".join((line_str[:line_column],

+                                markerString, line_str[line_column:]))

+        return line_str.strip()

+    def __dir__(self):

+        return "lineno col line".split() + dir(type(self))

+

+class ParseException(ParseBaseException):

+    """

+    Exception thrown when parse expressions don't match class;

+    supported attributes by name are:

+     - lineno - returns the line number of the exception text

+     - col - returns the column number of the exception text

+     - line - returns the line containing the exception text

+        

+    Example::

+        try:

+            Word(nums).setName("integer").parseString("ABC")

+        except ParseException as pe:

+            print(pe)

+            print("column: {}".format(pe.col))

+            

+    prints::

+       Expected integer (at char 0), (line:1, col:1)

+        column: 1

+    """

+    pass

+

+class ParseFatalException(ParseBaseException):

+    """user-throwable exception thrown when inconsistent parse content

+       is found; stops all parsing immediately"""

+    pass

+

+class ParseSyntaxException(ParseFatalException):

+    """just like L{ParseFatalException}, but thrown internally when an

+       L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop 

+       immediately because an unbacktrackable syntax error has been found"""

+    pass

+

+#~ class ReparseException(ParseBaseException):

+    #~ """Experimental class - parse actions can raise this exception to cause

+       #~ pyparsing to reparse the input string:

+        #~ - with a modified input string, and/or

+        #~ - with a modified start location

+       #~ Set the values of the ReparseException in the constructor, and raise the

+       #~ exception in a parse action to cause pyparsing to use the new string/location.

+       #~ Setting the values as None causes no change to be made.

+       #~ """

+    #~ def __init_( self, newstring, restartLoc ):

+        #~ self.newParseText = newstring

+        #~ self.reparseLoc = restartLoc

+

+class RecursiveGrammarException(Exception):

+    """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""

+    def __init__( self, parseElementList ):

+        self.parseElementTrace = parseElementList

+

+    def __str__( self ):

+        return "RecursiveGrammarException: %s" % self.parseElementTrace

+

+class _ParseResultsWithOffset(object):

+    def __init__(self,p1,p2):

+        self.tup = (p1,p2)

+    def __getitem__(self,i):

+        return self.tup[i]

+    def __repr__(self):

+        return repr(self.tup[0])

+    def setOffset(self,i):

+        self.tup = (self.tup[0],i)

+

+class ParseResults(object):

+    """

+    Structured parse results, to provide multiple means of access to the parsed data:

+       - as a list (C{len(results)})

+       - by list index (C{results[0], results[1]}, etc.)

+       - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})

+

+    Example::

+        integer = Word(nums)

+        date_str = (integer.setResultsName("year") + '/' 

+                        + integer.setResultsName("month") + '/' 

+                        + integer.setResultsName("day"))

+        # equivalent form:

+        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

+

+        # parseString returns a ParseResults object

+        result = date_str.parseString("1999/12/31")

+

+        def test(s, fn=repr):

+            print("%s -> %s" % (s, fn(eval(s))))

+        test("list(result)")

+        test("result[0]")

+        test("result['month']")

+        test("result.day")

+        test("'month' in result")

+        test("'minutes' in result")

+        test("result.dump()", str)

+    prints::

+        list(result) -> ['1999', '/', '12', '/', '31']

+        result[0] -> '1999'

+        result['month'] -> '12'

+        result.day -> '31'

+        'month' in result -> True

+        'minutes' in result -> False

+        result.dump() -> ['1999', '/', '12', '/', '31']

+        - day: 31

+        - month: 12

+        - year: 1999

+    """

+    def __new__(cls, toklist=None, name=None, asList=True, modal=True ):

+        if isinstance(toklist, cls):

+            return toklist

+        retobj = object.__new__(cls)

+        retobj.__doinit = True

+        return retobj

+

+    # Performance tuning: we construct a *lot* of these, so keep this

+    # constructor as small and fast as possible

+    def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):

+        if self.__doinit:

+            self.__doinit = False

+            self.__name = None

+            self.__parent = None

+            self.__accumNames = {}

+            self.__asList = asList

+            self.__modal = modal

+            if toklist is None:

+                toklist = []

+            if isinstance(toklist, list):

+                self.__toklist = toklist[:]

+            elif isinstance(toklist, _generatorType):

+                self.__toklist = list(toklist)

+            else:

+                self.__toklist = [toklist]

+            self.__tokdict = dict()

+

+        if name is not None and name:

+            if not modal:

+                self.__accumNames[name] = 0

+            if isinstance(name,int):

+                name = _ustr(name) # will always return a str, but use _ustr for consistency

+            self.__name = name

+            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):

+                if isinstance(toklist,basestring):

+                    toklist = [ toklist ]

+                if asList:

+                    if isinstance(toklist,ParseResults):

+                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)

+                    else:

+                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)

+                    self[name].__name = name

+                else:

+                    try:

+                        self[name] = toklist[0]

+                    except (KeyError,TypeError,IndexError):

+                        self[name] = toklist

+

+    def __getitem__( self, i ):

+        if isinstance( i, (int,slice) ):

+            return self.__toklist[i]

+        else:

+            if i not in self.__accumNames:

+                return self.__tokdict[i][-1][0]

+            else:

+                return ParseResults([ v[0] for v in self.__tokdict[i] ])

+

+    def __setitem__( self, k, v, isinstance=isinstance ):

+        if isinstance(v,_ParseResultsWithOffset):

+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]

+            sub = v[0]

+        elif isinstance(k,(int,slice)):

+            self.__toklist[k] = v

+            sub = v

+        else:

+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]

+            sub = v

+        if isinstance(sub,ParseResults):

+            sub.__parent = wkref(self)

+

+    def __delitem__( self, i ):

+        if isinstance(i,(int,slice)):

+            mylen = len( self.__toklist )

+            del self.__toklist[i]

+

+            # convert int to slice

+            if isinstance(i, int):

+                if i < 0:

+                    i += mylen

+                i = slice(i, i+1)

+            # get removed indices

+            removed = list(range(*i.indices(mylen)))

+            removed.reverse()

+            # fixup indices in token dictionary

+            for name,occurrences in self.__tokdict.items():

+                for j in removed:

+                    for k, (value, position) in enumerate(occurrences):

+                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))

+        else:

+            del self.__tokdict[i]

+

+    def __contains__( self, k ):

+        return k in self.__tokdict

+

+    def __len__( self ): return len( self.__toklist )

+    def __bool__(self): return ( not not self.__toklist )

+    __nonzero__ = __bool__

+    def __iter__( self ): return iter( self.__toklist )

+    def __reversed__( self ): return iter( self.__toklist[::-1] )

+    def _iterkeys( self ):

+        if hasattr(self.__tokdict, "iterkeys"):

+            return self.__tokdict.iterkeys()

+        else:

+            return iter(self.__tokdict)

+

+    def _itervalues( self ):

+        return (self[k] for k in self._iterkeys())

+            

+    def _iteritems( self ):

+        return ((k, self[k]) for k in self._iterkeys())

+

+    if PY_3:

+        keys = _iterkeys       

+        """Returns an iterator of all named result keys (Python 3.x only)."""

+

+        values = _itervalues

+        """Returns an iterator of all named result values (Python 3.x only)."""

+

+        items = _iteritems

+        """Returns an iterator of all named result key-value tuples (Python 3.x only)."""

+

+    else:

+        iterkeys = _iterkeys

+        """Returns an iterator of all named result keys (Python 2.x only)."""

+

+        itervalues = _itervalues

+        """Returns an iterator of all named result values (Python 2.x only)."""

+

+        iteritems = _iteritems

+        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""

+

+        def keys( self ):

+            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""

+            return list(self.iterkeys())

+

+        def values( self ):

+            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""

+            return list(self.itervalues())

+                

+        def items( self ):

+            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""

+            return list(self.iteritems())

+

+    def haskeys( self ):

+        """Since keys() returns an iterator, this method is helpful in bypassing

+           code that looks for the existence of any defined results names."""

+        return bool(self.__tokdict)

+        

+    def pop( self, *args, **kwargs):

+        """

+        Removes and returns item at specified index (default=C{last}).

+        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no

+        argument or an integer argument, it will use C{list} semantics

+        and pop tokens from the list of parsed tokens. If passed a 

+        non-integer argument (most likely a string), it will use C{dict}

+        semantics and pop the corresponding value from any defined 

+        results names. A second default return value argument is 

+        supported, just as in C{dict.pop()}.

+

+        Example::

+            def remove_first(tokens):

+                tokens.pop(0)

+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']

+            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']

+

+            label = Word(alphas)

+            patt = label("LABEL") + OneOrMore(Word(nums))

+            print(patt.parseString("AAB 123 321").dump())

+

+            # Use pop() in a parse action to remove named result (note that corresponding value is not

+            # removed from list form of results)

+            def remove_LABEL(tokens):

+                tokens.pop("LABEL")

+                return tokens

+            patt.addParseAction(remove_LABEL)

+            print(patt.parseString("AAB 123 321").dump())

+        prints::

+            ['AAB', '123', '321']

+            - LABEL: AAB

+

+            ['AAB', '123', '321']

+        """

+        if not args:

+            args = [-1]

+        for k,v in kwargs.items():

+            if k == 'default':

+                args = (args[0], v)

+            else:

+                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)

+        if (isinstance(args[0], int) or 

+                        len(args) == 1 or 

+                        args[0] in self):

+            index = args[0]

+            ret = self[index]

+            del self[index]

+            return ret

+        else:

+            defaultvalue = args[1]

+            return defaultvalue

+

+    def get(self, key, defaultValue=None):

+        """

+        Returns named result matching the given key, or if there is no

+        such name, then returns the given C{defaultValue} or C{None} if no

+        C{defaultValue} is specified.

+

+        Similar to C{dict.get()}.

+        

+        Example::

+            integer = Word(nums)

+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

+

+            result = date_str.parseString("1999/12/31")

+            print(result.get("year")) # -> '1999'

+            print(result.get("hour", "not specified")) # -> 'not specified'

+            print(result.get("hour")) # -> None

+        """

+        if key in self:

+            return self[key]

+        else:

+            return defaultValue

+

+    def insert( self, index, insStr ):

+        """

+        Inserts new element at location index in the list of parsed tokens.

+        

+        Similar to C{list.insert()}.

+

+        Example::

+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']

+

+            # use a parse action to insert the parse location in the front of the parsed results

+            def insert_locn(locn, tokens):

+                tokens.insert(0, locn)

+            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']

+        """

+        self.__toklist.insert(index, insStr)

+        # fixup indices in token dictionary

+        for name,occurrences in self.__tokdict.items():

+            for k, (value, position) in enumerate(occurrences):

+                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))

+

+    def append( self, item ):

+        """

+        Add single element to end of ParseResults list of elements.

+

+        Example::

+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']

+            

+            # use a parse action to compute the sum of the parsed integers, and add it to the end

+            def append_sum(tokens):

+                tokens.append(sum(map(int, tokens)))

+            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]

+        """

+        self.__toklist.append(item)

+

+    def extend( self, itemseq ):

+        """

+        Add sequence of elements to end of ParseResults list of elements.

+

+        Example::

+            patt = OneOrMore(Word(alphas))

+            

+            # use a parse action to append the reverse of the matched strings, to make a palindrome

+            def make_palindrome(tokens):

+                tokens.extend(reversed([t[::-1] for t in tokens]))

+                return ''.join(tokens)

+            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'

+        """

+        if isinstance(itemseq, ParseResults):

+            self += itemseq

+        else:

+            self.__toklist.extend(itemseq)

+

+    def clear( self ):

+        """

+        Clear all elements and results names.

+        """

+        del self.__toklist[:]

+        self.__tokdict.clear()

+

+    def __getattr__( self, name ):

+        try:

+            return self[name]

+        except KeyError:

+            return ""

+            

+        if name in self.__tokdict:

+            if name not in self.__accumNames:

+                return self.__tokdict[name][-1][0]

+            else:

+                return ParseResults([ v[0] for v in self.__tokdict[name] ])

+        else:

+            return ""

+

+    def __add__( self, other ):

+        ret = self.copy()

+        ret += other

+        return ret

+

+    def __iadd__( self, other ):

+        if other.__tokdict:

+            offset = len(self.__toklist)

+            addoffset = lambda a: offset if a<0 else a+offset

+            otheritems = other.__tokdict.items()

+            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )

+                                for (k,vlist) in otheritems for v in vlist]

+            for k,v in otherdictitems:

+                self[k] = v

+                if isinstance(v[0],ParseResults):

+                    v[0].__parent = wkref(self)

+            

+        self.__toklist += other.__toklist

+        self.__accumNames.update( other.__accumNames )

+        return self

+

+    def __radd__(self, other):

+        if isinstance(other,int) and other == 0:

+            # useful for merging many ParseResults using sum() builtin

+            return self.copy()

+        else:

+            # this may raise a TypeError - so be it

+            return other + self

+        

+    def __repr__( self ):

+        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )

+

+    def __str__( self ):

+        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'

+

+    def _asStringList( self, sep='' ):

+        out = []

+        for item in self.__toklist:

+            if out and sep:

+                out.append(sep)

+            if isinstance( item, ParseResults ):

+                out += item._asStringList()

+            else:

+                out.append( _ustr(item) )

+        return out

+

+    def asList( self ):

+        """

+        Returns the parse results as a nested list of matching tokens, all converted to strings.

+

+        Example::

+            patt = OneOrMore(Word(alphas))

+            result = patt.parseString("sldkj lsdkj sldkj")

+            # even though the result prints in string-like form, it is actually a pyparsing ParseResults

+            print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']

+            

+            # Use asList() to create an actual list

+            result_list = result.asList()

+            print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']

+        """

+        return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]

+

+    def asDict( self ):

+        """

+        Returns the named parse results as a nested dictionary.

+

+        Example::

+            integer = Word(nums)

+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

+            

+            result = date_str.parseString('12/31/1999')

+            print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})

+            

+            result_dict = result.asDict()

+            print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}

+

+            # even though a ParseResults supports dict-like access, sometime you just need to have a dict

+            import json

+            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable

+            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}

+        """

+        if PY_3:

+            item_fn = self.items

+        else:

+            item_fn = self.iteritems

+            

+        def toItem(obj):

+            if isinstance(obj, ParseResults):

+                if obj.haskeys():

+                    return obj.asDict()

+                else:

+                    return [toItem(v) for v in obj]

+            else:

+                return obj

+                

+        return dict((k,toItem(v)) for k,v in item_fn())

+

+    def copy( self ):

+        """

+        Returns a new copy of a C{ParseResults} object.

+        """

+        ret = ParseResults( self.__toklist )

+        ret.__tokdict = self.__tokdict.copy()

+        ret.__parent = self.__parent

+        ret.__accumNames.update( self.__accumNames )

+        ret.__name = self.__name

+        return ret

+

+    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):

+        """

+        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.

+        """

+        nl = "\n"

+        out = []

+        namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()

+                                                            for v in vlist)

+        nextLevelIndent = indent + "  "

+

+        # collapse out indents if formatting is not desired

+        if not formatted:

+            indent = ""

+            nextLevelIndent = ""

+            nl = ""

+

+        selfTag = None

+        if doctag is not None:

+            selfTag = doctag

+        else:

+            if self.__name:

+                selfTag = self.__name

+

+        if not selfTag:

+            if namedItemsOnly:

+                return ""

+            else:

+                selfTag = "ITEM"

+

+        out += [ nl, indent, "<", selfTag, ">" ]

+

+        for i,res in enumerate(self.__toklist):

+            if isinstance(res,ParseResults):

+                if i in namedItems:

+                    out += [ res.asXML(namedItems[i],

+                                        namedItemsOnly and doctag is None,

+                                        nextLevelIndent,

+                                        formatted)]

+                else:

+                    out += [ res.asXML(None,

+                                        namedItemsOnly and doctag is None,

+                                        nextLevelIndent,

+                                        formatted)]

+            else:

+                # individual token, see if there is a name for it

+                resTag = None

+                if i in namedItems:

+                    resTag = namedItems[i]

+                if not resTag:

+                    if namedItemsOnly:

+                        continue

+                    else:

+                        resTag = "ITEM"

+                xmlBodyText = _xml_escape(_ustr(res))

+                out += [ nl, nextLevelIndent, "<", resTag, ">",

+                                                xmlBodyText,

+                                                "</", resTag, ">" ]

+

+        out += [ nl, indent, "</", selfTag, ">" ]

+        return "".join(out)

+

+    def __lookup(self,sub):

+        for k,vlist in self.__tokdict.items():

+            for v,loc in vlist:

+                if sub is v:

+                    return k

+        return None

+

+    def getName(self):

+        """

+        Returns the results name for this token expression. Useful when several 

+        different expressions might match at a particular location.

+

+        Example::

+            integer = Word(nums)

+            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")

+            house_number_expr = Suppress('#') + Word(nums, alphanums)

+            user_data = (Group(house_number_expr)("house_number") 

+                        | Group(ssn_expr)("ssn")

+                        | Group(integer)("age"))

+            user_info = OneOrMore(user_data)

+            

+            result = user_info.parseString("22 111-22-3333 #221B")

+            for item in result:

+                print(item.getName(), ':', item[0])

+        prints::

+            age : 22

+            ssn : 111-22-3333

+            house_number : 221B

+        """

+        if self.__name:

+            return self.__name

+        elif self.__parent:

+            par = self.__parent()

+            if par:

+                return par.__lookup(self)

+            else:

+                return None

+        elif (len(self) == 1 and

+               len(self.__tokdict) == 1 and

+               next(iter(self.__tokdict.values()))[0][1] in (0,-1)):

+            return next(iter(self.__tokdict.keys()))

+        else:

+            return None

+

+    def dump(self, indent='', depth=0, full=True):

+        """

+        Diagnostic method for listing out the contents of a C{ParseResults}.

+        Accepts an optional C{indent} argument so that this string can be embedded

+        in a nested display of other data.

+

+        Example::

+            integer = Word(nums)

+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

+            

+            result = date_str.parseString('12/31/1999')

+            print(result.dump())

+        prints::

+            ['12', '/', '31', '/', '1999']

+            - day: 1999

+            - month: 31

+            - year: 12

+        """

+        out = []

+        NL = '\n'

+        out.append( indent+_ustr(self.asList()) )

+        if full:

+            if self.haskeys():

+                items = sorted((str(k), v) for k,v in self.items())

+                for k,v in items:

+                    if out:

+                        out.append(NL)

+                    out.append( "%s%s- %s: " % (indent,('  '*depth), k) )

+                    if isinstance(v,ParseResults):

+                        if v:

+                            out.append( v.dump(indent,depth+1) )

+                        else:

+                            out.append(_ustr(v))

+                    else:

+                        out.append(repr(v))

+            elif any(isinstance(vv,ParseResults) for vv in self):

+                v = self

+                for i,vv in enumerate(v):

+                    if isinstance(vv,ParseResults):

+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),vv.dump(indent,depth+1) ))

+                    else:

+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),_ustr(vv)))

+            

+        return "".join(out)

+

+    def pprint(self, *args, **kwargs):

+        """

+        Pretty-printer for parsed results as a list, using the C{pprint} module.

+        Accepts additional positional or keyword args as defined for the 

+        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})

+

+        Example::

+            ident = Word(alphas, alphanums)

+            num = Word(nums)

+            func = Forward()

+            term = ident | num | Group('(' + func + ')')

+            func <<= ident + Group(Optional(delimitedList(term)))

+            result = func.parseString("fna a,b,(fnb c,d,200),100")

+            result.pprint(width=40)

+        prints::

+            ['fna',

+             ['a',

+              'b',

+              ['(', 'fnb', ['c', 'd', '200'], ')'],

+              '100']]

+        """

+        pprint.pprint(self.asList(), *args, **kwargs)

+

+    # add support for pickle protocol

+    def __getstate__(self):

+        return ( self.__toklist,

+                 ( self.__tokdict.copy(),

+                   self.__parent is not None and self.__parent() or None,

+                   self.__accumNames,

+                   self.__name ) )

+

+    def __setstate__(self,state):

+        self.__toklist = state[0]

+        (self.__tokdict,

+         par,

+         inAccumNames,

+         self.__name) = state[1]

+        self.__accumNames = {}

+        self.__accumNames.update(inAccumNames)

+        if par is not None:

+            self.__parent = wkref(par)

+        else:

+            self.__parent = None

+

+    def __getnewargs__(self):

+        return self.__toklist, self.__name, self.__asList, self.__modal

+

+    def __dir__(self):

+        return (dir(type(self)) + list(self.keys()))

+

+collections.MutableMapping.register(ParseResults)

+

+def col (loc,strg):

+    """Returns current column within a string, counting newlines as line separators.

+   The first column is number 1.

+

+   Note: the default parsing behavior is to expand tabs in the input string

+   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information

+   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a

+   consistent view of the parsed string, the parse location, and line and column

+   positions within the parsed string.

+   """

+    s = strg

+    return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)

+

+def lineno(loc,strg):

+    """Returns current line number within a string, counting newlines as line separators.

+   The first line is number 1.

+

+   Note: the default parsing behavior is to expand tabs in the input string

+   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information

+   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a

+   consistent view of the parsed string, the parse location, and line and column

+   positions within the parsed string.

+   """

+    return strg.count("\n",0,loc) + 1

+

+def line( loc, strg ):

+    """Returns the line of text containing loc within a string, counting newlines as line separators.

+       """

+    lastCR = strg.rfind("\n", 0, loc)

+    nextCR = strg.find("\n", loc)

+    if nextCR >= 0:

+        return strg[lastCR+1:nextCR]

+    else:

+        return strg[lastCR+1:]

+

+def _defaultStartDebugAction( instring, loc, expr ):

+    print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))

+

+def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):

+    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))

+

+def _defaultExceptionDebugAction( instring, loc, expr, exc ):

+    print ("Exception raised:" + _ustr(exc))

+

+def nullDebugAction(*args):

+    """'Do-nothing' debug action, to suppress debugging output during parsing."""

+    pass

+

+# Only works on Python 3.x - nonlocal is toxic to Python 2 installs

+#~ 'decorator to trim function calls to match the arity of the target'

+#~ def _trim_arity(func, maxargs=3):

+    #~ if func in singleArgBuiltins:

+        #~ return lambda s,l,t: func(t)

+    #~ limit = 0

+    #~ foundArity = False

+    #~ def wrapper(*args):

+        #~ nonlocal limit,foundArity

+        #~ while 1:

+            #~ try:

+                #~ ret = func(*args[limit:])

+                #~ foundArity = True

+                #~ return ret

+            #~ except TypeError:

+                #~ if limit == maxargs or foundArity:

+                    #~ raise

+                #~ limit += 1

+                #~ continue

+    #~ return wrapper

+

+# this version is Python 2.x-3.x cross-compatible

+'decorator to trim function calls to match the arity of the target'

+def _trim_arity(func, maxargs=2):

+    if func in singleArgBuiltins:

+        return lambda s,l,t: func(t)

+    limit = [0]

+    foundArity = [False]

+    

+    # traceback return data structure changed in Py3.5 - normalize back to plain tuples

+    if system_version[:2] >= (3,5):

+        def extract_stack(limit=0):

+            # special handling for Python 3.5.0 - extra deep call stack by 1

+            offset = -3 if system_version == (3,5,0) else -2

+            frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]

+            return [(frame_summary.filename, frame_summary.lineno)]

+        def extract_tb(tb, limit=0):

+            frames = traceback.extract_tb(tb, limit=limit)

+            frame_summary = frames[-1]

+            return [(frame_summary.filename, frame_summary.lineno)]

+    else:

+        extract_stack = traceback.extract_stack

+        extract_tb = traceback.extract_tb

+    

+    # synthesize what would be returned by traceback.extract_stack at the call to 

+    # user's parse action 'func', so that we don't incur call penalty at parse time

+    

+    LINE_DIFF = 6

+    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND 

+    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!

+    this_line = extract_stack(limit=2)[-1]

+    pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)

+

+    def wrapper(*args):

+        while 1:

+            try:

+                ret = func(*args[limit[0]:])

+                foundArity[0] = True

+                return ret

+            except TypeError:

+                # re-raise TypeErrors if they did not come from our arity testing

+                if foundArity[0]:

+                    raise

+                else:

+                    try:

+                        tb = sys.exc_info()[-1]

+                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:

+                            raise

+                    finally:

+                        del tb

+

+                if limit[0] <= maxargs:

+                    limit[0] += 1

+                    continue

+                raise

+

+    # copy func name to wrapper for sensible debug output

+    func_name = "<parse action>"

+    try:

+        func_name = getattr(func, '__name__', 

+                            getattr(func, '__class__').__name__)

+    except Exception:

+        func_name = str(func)

+    wrapper.__name__ = func_name

+

+    return wrapper

+

+class ParserElement(object):

+    """Abstract base level parser element class."""

+    DEFAULT_WHITE_CHARS = " \n\t\r"

+    verbose_stacktrace = False

+

+    @staticmethod

+    def setDefaultWhitespaceChars( chars ):

+        r"""

+        Overrides the default whitespace chars

+

+        Example::

+            # default whitespace chars are space, <TAB> and newline

+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']

+            

+            # change to just treat newline as significant

+            ParserElement.setDefaultWhitespaceChars(" \t")

+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']

+        """

+        ParserElement.DEFAULT_WHITE_CHARS = chars

+

+    @staticmethod

+    def inlineLiteralsUsing(cls):

+        """

+        Set class to be used for inclusion of string literals into a parser.

+        

+        Example::

+            # default literal class used is Literal

+            integer = Word(nums)

+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

+

+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']

+

+

+            # change to Suppress

+            ParserElement.inlineLiteralsUsing(Suppress)

+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           

+

+            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']

+        """

+        ParserElement._literalStringClass = cls

+

+    def __init__( self, savelist=False ):

+        self.parseAction = list()

+        self.failAction = None

+        #~ self.name = "<unknown>"  # don't define self.name, let subclasses try/except upcall

+        self.strRepr = None

+        self.resultsName = None

+        self.saveAsList = savelist

+        self.skipWhitespace = True

+        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS

+        self.copyDefaultWhiteChars = True

+        self.mayReturnEmpty = False # used when checking for left-recursion

+        self.keepTabs = False

+        self.ignoreExprs = list()

+        self.debug = False

+        self.streamlined = False

+        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index

+        self.errmsg = ""

+        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)

+        self.debugActions = ( None, None, None ) #custom debug actions

+        self.re = None

+        self.callPreparse = True # used to avoid redundant calls to preParse

+        self.callDuringTry = False

+

+    def copy( self ):

+        """

+        Make a copy of this C{ParserElement}.  Useful for defining different parse actions

+        for the same parsing pattern, using copies of the original parse element.

+        

+        Example::

+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))

+            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")

+            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")

+            

+            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))

+        prints::

+            [5120, 100, 655360, 268435456]

+        Equivalent form of C{expr.copy()} is just C{expr()}::

+            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")

+        """

+        cpy = copy.copy( self )

+        cpy.parseAction = self.parseAction[:]

+        cpy.ignoreExprs = self.ignoreExprs[:]

+        if self.copyDefaultWhiteChars:

+            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS

+        return cpy

+

+    def setName( self, name ):

+        """

+        Define name for this expression, makes debugging and exception messages clearer.

+        

+        Example::

+            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)

+            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)

+        """

+        self.name = name

+        self.errmsg = "Expected " + self.name

+        if hasattr(self,"exception"):

+            self.exception.msg = self.errmsg

+        return self

+

+    def setResultsName( self, name, listAllMatches=False ):

+        """

+        Define name for referencing matching tokens as a nested attribute

+        of the returned parse results.

+        NOTE: this returns a *copy* of the original C{ParserElement} object;

+        this is so that the client can define a basic element, such as an

+        integer, and reference it in multiple places with different names.

+

+        You can also set results names using the abbreviated syntax,

+        C{expr("name")} in place of C{expr.setResultsName("name")} - 

+        see L{I{__call__}<__call__>}.

+

+        Example::

+            date_str = (integer.setResultsName("year") + '/' 

+                        + integer.setResultsName("month") + '/' 

+                        + integer.setResultsName("day"))

+

+            # equivalent form:

+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")

+        """

+        newself = self.copy()

+        if name.endswith("*"):

+            name = name[:-1]

+            listAllMatches=True

+        newself.resultsName = name

+        newself.modalResults = not listAllMatches

+        return newself

+

+    def setBreak(self,breakFlag = True):

+        """Method to invoke the Python pdb debugger when this element is

+           about to be parsed. Set C{breakFlag} to True to enable, False to

+           disable.

+        """

+        if breakFlag:

+            _parseMethod = self._parse

+            def breaker(instring, loc, doActions=True, callPreParse=True):

+                import pdb

+                pdb.set_trace()

+                return _parseMethod( instring, loc, doActions, callPreParse )

+            breaker._originalParseMethod = _parseMethod

+            self._parse = breaker

+        else:

+            if hasattr(self._parse,"_originalParseMethod"):

+                self._parse = self._parse._originalParseMethod

+        return self

+

+    def setParseAction( self, *fns, **kwargs ):

+        """

+        Define action to perform when successfully matching parse element definition.

+        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},

+        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:

+         - s   = the original string being parsed (see note below)

+         - loc = the location of the matching substring

+         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object

+        If the functions in fns modify the tokens, they can return them as the return

+        value from fn, and the modified list of tokens will replace the original.

+        Otherwise, fn does not need to return any value.

+

+        Optional keyword arguments:

+         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing

+

+        Note: the default parsing behavior is to expand tabs in the input string

+        before starting the parsing process.  See L{I{parseString}<parseString>} for more information

+        on parsing strings containing C{<TAB>}s, and suggested methods to maintain a

+        consistent view of the parsed string, the parse location, and line and column

+        positions within the parsed string.

+        

+        Example::

+            integer = Word(nums)

+            date_str = integer + '/' + integer + '/' + integer

+

+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']

+

+            # use parse action to convert to ints at parse time

+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))

+            date_str = integer + '/' + integer + '/' + integer

+

+            # note that integer fields are now ints, not strings

+            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]

+        """

+        self.parseAction = list(map(_trim_arity, list(fns)))

+        self.callDuringTry = kwargs.get("callDuringTry", False)

+        return self

+

+    def addParseAction( self, *fns, **kwargs ):

+        """

+        Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.

+        

+        See examples in L{I{copy}<copy>}.

+        """

+        self.parseAction += list(map(_trim_arity, list(fns)))

+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)

+        return self

+

+    def addCondition(self, *fns, **kwargs):

+        """Add a boolean predicate function to expression's list of parse actions. See 

+        L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, 

+        functions passed to C{addCondition} need to return boolean success/fail of the condition.

+

+        Optional keyword arguments:

+         - message = define a custom message to be used in the raised exception

+         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException

+         

+        Example::

+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))

+            year_int = integer.copy()

+            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")

+            date_str = year_int + '/' + integer + '/' + integer

+

+            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)

+        """

+        msg = kwargs.get("message", "failed user-defined condition")

+        exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException

+        for fn in fns:

+            def pa(s,l,t):

+                if not bool(_trim_arity(fn)(s,l,t)):

+                    raise exc_type(s,l,msg)

+            self.parseAction.append(pa)

+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)

+        return self

+

+    def setFailAction( self, fn ):

+        """Define action to perform if parsing fails at this expression.

+           Fail acton fn is a callable function that takes the arguments

+           C{fn(s,loc,expr,err)} where:

+            - s = string being parsed

+            - loc = location where expression match was attempted and failed

+            - expr = the parse expression that failed

+            - err = the exception thrown

+           The function returns no value.  It may throw C{L{ParseFatalException}}

+           if it is desired to stop parsing immediately."""

+        self.failAction = fn

+        return self

+

+    def _skipIgnorables( self, instring, loc ):

+        exprsFound = True

+        while exprsFound:

+            exprsFound = False

+            for e in self.ignoreExprs:

+                try:

+                    while 1:

+                        loc,dummy = e._parse( instring, loc )

+                        exprsFound = True

+                except ParseException:

+                    pass

+        return loc

+

+    def preParse( self, instring, loc ):

+        if self.ignoreExprs:

+            loc = self._skipIgnorables( instring, loc )

+

+        if self.skipWhitespace:

+            wt = self.whiteChars

+            instrlen = len(instring)

+            while loc < instrlen and instring[loc] in wt:

+                loc += 1

+

+        return loc

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        return loc, []

+

+    def postParse( self, instring, loc, tokenlist ):

+        return tokenlist

+

+    #~ @profile

+    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):

+        debugging = ( self.debug ) #and doActions )

+

+        if debugging or self.failAction:

+            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))

+            if (self.debugActions[0] ):

+                self.debugActions[0]( instring, loc, self )

+            if callPreParse and self.callPreparse:

+                preloc = self.preParse( instring, loc )

+            else:

+                preloc = loc

+            tokensStart = preloc

+            try:

+                try:

+                    loc,tokens = self.parseImpl( instring, preloc, doActions )

+                except IndexError:

+                    raise ParseException( instring, len(instring), self.errmsg, self )

+            except ParseBaseException as err:

+                #~ print ("Exception raised:", err)

+                if self.debugActions[2]:

+                    self.debugActions[2]( instring, tokensStart, self, err )

+                if self.failAction:

+                    self.failAction( instring, tokensStart, self, err )

+                raise

+        else:

+            if callPreParse and self.callPreparse:

+                preloc = self.preParse( instring, loc )

+            else:

+                preloc = loc

+            tokensStart = preloc

+            if self.mayIndexError or loc >= len(instring):

+                try:

+                    loc,tokens = self.parseImpl( instring, preloc, doActions )

+                except IndexError:

+                    raise ParseException( instring, len(instring), self.errmsg, self )

+            else:

+                loc,tokens = self.parseImpl( instring, preloc, doActions )

+

+        tokens = self.postParse( instring, loc, tokens )

+

+        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )

+        if self.parseAction and (doActions or self.callDuringTry):

+            if debugging:

+                try:

+                    for fn in self.parseAction:

+                        tokens = fn( instring, tokensStart, retTokens )

+                        if tokens is not None:

+                            retTokens = ParseResults( tokens,

+                                                      self.resultsName,

+                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),

+                                                      modal=self.modalResults )

+                except ParseBaseException as err:

+                    #~ print "Exception raised in user parse action:", err

+                    if (self.debugActions[2] ):

+                        self.debugActions[2]( instring, tokensStart, self, err )

+                    raise

+            else:

+                for fn in self.parseAction:

+                    tokens = fn( instring, tokensStart, retTokens )

+                    if tokens is not None:

+                        retTokens = ParseResults( tokens,

+                                                  self.resultsName,

+                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),

+                                                  modal=self.modalResults )

+

+        if debugging:

+            #~ print ("Matched",self,"->",retTokens.asList())

+            if (self.debugActions[1] ):

+                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )

+

+        return loc, retTokens

+

+    def tryParse( self, instring, loc ):

+        try:

+            return self._parse( instring, loc, doActions=False )[0]

+        except ParseFatalException:

+            raise ParseException( instring, loc, self.errmsg, self)

+    

+    def canParseNext(self, instring, loc):

+        try:

+            self.tryParse(instring, loc)

+        except (ParseException, IndexError):

+            return False

+        else:

+            return True

+

+    class _UnboundedCache(object):

+        def __init__(self):

+            cache = {}

+            self.not_in_cache = not_in_cache = object()

+

+            def get(self, key):

+                return cache.get(key, not_in_cache)

+

+            def set(self, key, value):

+                cache[key] = value

+

+            def clear(self):

+                cache.clear()

+

+            self.get = types.MethodType(get, self)

+            self.set = types.MethodType(set, self)

+            self.clear = types.MethodType(clear, self)

+

+    if _OrderedDict is not None:

+        class _FifoCache(object):

+            def __init__(self, size):

+                self.not_in_cache = not_in_cache = object()

+

+                cache = _OrderedDict()

+

+                def get(self, key):

+                    return cache.get(key, not_in_cache)

+

+                def set(self, key, value):

+                    cache[key] = value

+                    if len(cache) > size:

+                        cache.popitem(False)

+

+                def clear(self):

+                    cache.clear()

+

+                self.get = types.MethodType(get, self)

+                self.set = types.MethodType(set, self)

+                self.clear = types.MethodType(clear, self)

+

+    else:

+        class _FifoCache(object):

+            def __init__(self, size):

+                self.not_in_cache = not_in_cache = object()

+

+                cache = {}

+                key_fifo = collections.deque([], size)

+

+                def get(self, key):

+                    return cache.get(key, not_in_cache)

+

+                def set(self, key, value):

+                    cache[key] = value

+                    if len(cache) > size:

+                        cache.pop(key_fifo.popleft(), None)

+                    key_fifo.append(key)

+

+                def clear(self):

+                    cache.clear()

+                    key_fifo.clear()

+

+                self.get = types.MethodType(get, self)

+                self.set = types.MethodType(set, self)

+                self.clear = types.MethodType(clear, self)

+

+    # argument cache for optimizing repeated calls when backtracking through recursive expressions

+    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail

+    packrat_cache_lock = RLock()

+    packrat_cache_stats = [0, 0]

+

+    # this method gets repeatedly called during backtracking with the same arguments -

+    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression

+    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):

+        HIT, MISS = 0, 1

+        lookup = (self, instring, loc, callPreParse, doActions)

+        with ParserElement.packrat_cache_lock:

+            cache = ParserElement.packrat_cache

+            value = cache.get(lookup)

+            if value is cache.not_in_cache:

+                ParserElement.packrat_cache_stats[MISS] += 1

+                try:

+                    value = self._parseNoCache(instring, loc, doActions, callPreParse)

+                except ParseBaseException as pe:

+                    # cache a copy of the exception, without the traceback

+                    cache.set(lookup, pe.__class__(*pe.args))

+                    raise

+                else:

+                    cache.set(lookup, (value[0], value[1].copy()))

+                    return value

+            else:

+                ParserElement.packrat_cache_stats[HIT] += 1

+                if isinstance(value, Exception):

+                    raise value

+                return (value[0], value[1].copy())

+

+    _parse = _parseNoCache

+

+    @staticmethod

+    def resetCache():

+        ParserElement.packrat_cache.clear()

+        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)

+

+    _packratEnabled = False

+    @staticmethod

+    def enablePackrat(cache_size_limit=128):

+        """Enables "packrat" parsing, which adds memoizing to the parsing logic.

+           Repeated parse attempts at the same string location (which happens

+           often in many complex grammars) can immediately return a cached value,

+           instead of re-executing parsing/validating code.  Memoizing is done of

+           both valid results and parsing exceptions.

+           

+           Parameters:

+            - cache_size_limit - (default=C{128}) - if an integer value is provided

+              will limit the size of the packrat cache; if None is passed, then

+              the cache size will be unbounded; if 0 is passed, the cache will

+              be effectively disabled.

+            

+           This speedup may break existing programs that use parse actions that

+           have side-effects.  For this reason, packrat parsing is disabled when

+           you first import pyparsing.  To activate the packrat feature, your

+           program must call the class method C{ParserElement.enablePackrat()}.  If

+           your program uses C{psyco} to "compile as you go", you must call

+           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,

+           Python will crash.  For best results, call C{enablePackrat()} immediately

+           after importing pyparsing.

+           

+           Example::

+               import pyparsing

+               pyparsing.ParserElement.enablePackrat()

+        """

+        if not ParserElement._packratEnabled:

+            ParserElement._packratEnabled = True

+            if cache_size_limit is None:

+                ParserElement.packrat_cache = ParserElement._UnboundedCache()

+            else:

+                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)

+            ParserElement._parse = ParserElement._parseCache

+

+    def parseString( self, instring, parseAll=False ):

+        """

+        Execute the parse expression with the given string.

+        This is the main interface to the client code, once the complete

+        expression has been built.

+

+        If you want the grammar to require that the entire input string be

+        successfully parsed, then set C{parseAll} to True (equivalent to ending

+        the grammar with C{L{StringEnd()}}).

+

+        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,

+        in order to report proper column numbers in parse actions.

+        If the input string contains tabs and

+        the grammar uses parse actions that use the C{loc} argument to index into the

+        string being parsed, you can ensure you have a consistent view of the input

+        string by:

+         - calling C{parseWithTabs} on your grammar before calling C{parseString}

+           (see L{I{parseWithTabs}<parseWithTabs>})

+         - define your parse action using the full C{(s,loc,toks)} signature, and

+           reference the input string using the parse action's C{s} argument

+         - explictly expand the tabs in your input string before calling

+           C{parseString}

+        

+        Example::

+            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']

+            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text

+        """

+        ParserElement.resetCache()

+        if not self.streamlined:

+            self.streamline()

+            #~ self.saveAsList = True

+        for e in self.ignoreExprs:

+            e.streamline()

+        if not self.keepTabs:

+            instring = instring.expandtabs()

+        try:

+            loc, tokens = self._parse( instring, 0 )

+            if parseAll:

+                loc = self.preParse( instring, loc )

+                se = Empty() + StringEnd()

+                se._parse( instring, loc )

+        except ParseBaseException as exc:

+            if ParserElement.verbose_stacktrace:

+                raise

+            else:

+                # catch and re-raise exception from here, clears out pyparsing internal stack trace

+                raise exc

+        else:

+            return tokens

+

+    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):

+        """

+        Scan the input string for expression matches.  Each match will return the

+        matching tokens, start location, and end location.  May be called with optional

+        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If

+        C{overlap} is specified, then overlapping matches will be reported.

+

+        Note that the start and end locations are reported relative to the string

+        being parsed.  See L{I{parseString}<parseString>} for more information on parsing

+        strings with embedded tabs.

+

+        Example::

+            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"

+            print(source)

+            for tokens,start,end in Word(alphas).scanString(source):

+                print(' '*start + '^'*(end-start))

+                print(' '*start + tokens[0])

+        

+        prints::

+        

+            sldjf123lsdjjkf345sldkjf879lkjsfd987

+            ^^^^^

+            sldjf

+                    ^^^^^^^

+                    lsdjjkf

+                              ^^^^^^

+                              sldkjf

+                                       ^^^^^^

+                                       lkjsfd

+        """

+        if not self.streamlined:

+            self.streamline()

+        for e in self.ignoreExprs:

+            e.streamline()

+

+        if not self.keepTabs:

+            instring = _ustr(instring).expandtabs()

+        instrlen = len(instring)

+        loc = 0

+        preparseFn = self.preParse

+        parseFn = self._parse

+        ParserElement.resetCache()

+        matches = 0

+        try:

+            while loc <= instrlen and matches < maxMatches:

+                try:

+                    preloc = preparseFn( instring, loc )

+                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )

+                except ParseException:

+                    loc = preloc+1

+                else:

+                    if nextLoc > loc:

+                        matches += 1

+                        yield tokens, preloc, nextLoc

+                        if overlap:

+                            nextloc = preparseFn( instring, loc )

+                            if nextloc > loc:

+                                loc = nextLoc

+                            else:

+                                loc += 1

+                        else:

+                            loc = nextLoc

+                    else:

+                        loc = preloc+1

+        except ParseBaseException as exc:

+            if ParserElement.verbose_stacktrace:

+                raise

+            else:

+                # catch and re-raise exception from here, clears out pyparsing internal stack trace

+                raise exc

+

+    def transformString( self, instring ):

+        """

+        Extension to C{L{scanString}}, to modify matching text with modified tokens that may

+        be returned from a parse action.  To use C{transformString}, define a grammar and

+        attach a parse action to it that modifies the returned token list.

+        Invoking C{transformString()} on a target string will then scan for matches,

+        and replace the matched text patterns according to the logic in the parse

+        action.  C{transformString()} returns the resulting transformed string.

+        

+        Example::

+            wd = Word(alphas)

+            wd.setParseAction(lambda toks: toks[0].title())

+            

+            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))

+        Prints::

+            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.

+        """

+        out = []

+        lastE = 0

+        # force preservation of <TAB>s, to minimize unwanted transformation of string, and to

+        # keep string locs straight between transformString and scanString

+        self.keepTabs = True

+        try:

+            for t,s,e in self.scanString( instring ):

+                out.append( instring[lastE:s] )

+                if t:

+                    if isinstance(t,ParseResults):

+                        out += t.asList()

+                    elif isinstance(t,list):

+                        out += t

+                    else:

+                        out.append(t)

+                lastE = e

+            out.append(instring[lastE:])

+            out = [o for o in out if o]

+            return "".join(map(_ustr,_flatten(out)))

+        except ParseBaseException as exc:

+            if ParserElement.verbose_stacktrace:

+                raise

+            else:

+                # catch and re-raise exception from here, clears out pyparsing internal stack trace

+                raise exc

+

+    def searchString( self, instring, maxMatches=_MAX_INT ):

+        """

+        Another extension to C{L{scanString}}, simplifying the access to the tokens found

+        to match the given parse expression.  May be called with optional

+        C{maxMatches} argument, to clip searching after 'n' matches are found.

+        

+        Example::

+            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters

+            cap_word = Word(alphas.upper(), alphas.lower())

+            

+            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))

+        prints::

+            ['More', 'Iron', 'Lead', 'Gold', 'I']

+        """

+        try:

+            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])

+        except ParseBaseException as exc:

+            if ParserElement.verbose_stacktrace:

+                raise

+            else:

+                # catch and re-raise exception from here, clears out pyparsing internal stack trace

+                raise exc

+

+    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):

+        """

+        Generator method to split a string using the given expression as a separator.

+        May be called with optional C{maxsplit} argument, to limit the number of splits;

+        and the optional C{includeSeparators} argument (default=C{False}), if the separating

+        matching text should be included in the split results.

+        

+        Example::        

+            punc = oneOf(list(".,;:/-!?"))

+            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))

+        prints::

+            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']

+        """

+        splits = 0

+        last = 0

+        for t,s,e in self.scanString(instring, maxMatches=maxsplit):

+            yield instring[last:s]

+            if includeSeparators:

+                yield t[0]

+            last = e

+        yield instring[last:]

+

+    def __add__(self, other ):

+        """

+        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement

+        converts them to L{Literal}s by default.

+        

+        Example::

+            greet = Word(alphas) + "," + Word(alphas) + "!"

+            hello = "Hello, World!"

+            print (hello, "->", greet.parseString(hello))

+        Prints::

+            Hello, World! -> ['Hello', ',', 'World', '!']

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return And( [ self, other ] )

+

+    def __radd__(self, other ):

+        """

+        Implementation of + operator when left operand is not a C{L{ParserElement}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return other + self

+

+    def __sub__(self, other):

+        """

+        Implementation of - operator, returns C{L{And}} with error stop

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return And( [ self, And._ErrorStop(), other ] )

+

+    def __rsub__(self, other ):

+        """

+        Implementation of - operator when left operand is not a C{L{ParserElement}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return other - self

+

+    def __mul__(self,other):

+        """

+        Implementation of * operator, allows use of C{expr * 3} in place of

+        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer

+        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples

+        may also include C{None} as in:

+         - C{expr*(n,None)} or C{expr*(n,)} is equivalent

+              to C{expr*n + L{ZeroOrMore}(expr)}

+              (read as "at least n instances of C{expr}")

+         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}

+              (read as "0 to n instances of C{expr}")

+         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}

+         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}

+

+        Note that C{expr*(None,n)} does not raise an exception if

+        more than n exprs exist in the input stream; that is,

+        C{expr*(None,n)} does not enforce a maximum number of expr

+        occurrences.  If this behavior is desired, then write

+        C{expr*(None,n) + ~expr}

+        """

+        if isinstance(other,int):

+            minElements, optElements = other,0

+        elif isinstance(other,tuple):

+            other = (other + (None, None))[:2]

+            if other[0] is None:

+                other = (0, other[1])

+            if isinstance(other[0],int) and other[1] is None:

+                if other[0] == 0:

+                    return ZeroOrMore(self)

+                if other[0] == 1:

+                    return OneOrMore(self)

+                else:

+                    return self*other[0] + ZeroOrMore(self)

+            elif isinstance(other[0],int) and isinstance(other[1],int):

+                minElements, optElements = other

+                optElements -= minElements

+            else:

+                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))

+        else:

+            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))

+

+        if minElements < 0:

+            raise ValueError("cannot multiply ParserElement by negative value")

+        if optElements < 0:

+            raise ValueError("second tuple value must be greater or equal to first tuple value")

+        if minElements == optElements == 0:

+            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")

+

+        if (optElements):

+            def makeOptionalList(n):

+                if n>1:

+                    return Optional(self + makeOptionalList(n-1))

+                else:

+                    return Optional(self)

+            if minElements:

+                if minElements == 1:

+                    ret = self + makeOptionalList(optElements)

+                else:

+                    ret = And([self]*minElements) + makeOptionalList(optElements)

+            else:

+                ret = makeOptionalList(optElements)

+        else:

+            if minElements == 1:

+                ret = self

+            else:

+                ret = And([self]*minElements)

+        return ret

+

+    def __rmul__(self, other):

+        return self.__mul__(other)

+

+    def __or__(self, other ):

+        """

+        Implementation of | operator - returns C{L{MatchFirst}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return MatchFirst( [ self, other ] )

+

+    def __ror__(self, other ):

+        """

+        Implementation of | operator when left operand is not a C{L{ParserElement}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return other | self

+

+    def __xor__(self, other ):

+        """

+        Implementation of ^ operator - returns C{L{Or}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return Or( [ self, other ] )

+

+    def __rxor__(self, other ):

+        """

+        Implementation of ^ operator when left operand is not a C{L{ParserElement}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return other ^ self

+

+    def __and__(self, other ):

+        """

+        Implementation of & operator - returns C{L{Each}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return Each( [ self, other ] )

+

+    def __rand__(self, other ):

+        """

+        Implementation of & operator when left operand is not a C{L{ParserElement}}

+        """

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        if not isinstance( other, ParserElement ):

+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),

+                    SyntaxWarning, stacklevel=2)

+            return None

+        return other & self

+

+    def __invert__( self ):

+        """

+        Implementation of ~ operator - returns C{L{NotAny}}

+        """

+        return NotAny( self )

+

+    def __call__(self, name=None):

+        """

+        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.

+        

+        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be

+        passed as C{True}.

+           

+        If C{name} is omitted, same as calling C{L{copy}}.

+

+        Example::

+            # these are equivalent

+            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")

+            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             

+        """

+        if name is not None:

+            return self.setResultsName(name)

+        else:

+            return self.copy()

+

+    def suppress( self ):

+        """

+        Suppresses the output of this C{ParserElement}; useful to keep punctuation from

+        cluttering up returned output.

+        """

+        return Suppress( self )

+

+    def leaveWhitespace( self ):

+        """

+        Disables the skipping of whitespace before matching the characters in the

+        C{ParserElement}'s defined pattern.  This is normally only used internally by

+        the pyparsing module, but may be needed in some whitespace-sensitive grammars.

+        """

+        self.skipWhitespace = False

+        return self

+

+    def setWhitespaceChars( self, chars ):

+        """

+        Overrides the default whitespace chars

+        """

+        self.skipWhitespace = True

+        self.whiteChars = chars

+        self.copyDefaultWhiteChars = False

+        return self

+

+    def parseWithTabs( self ):

+        """

+        Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.

+        Must be called before C{parseString} when the input grammar contains elements that

+        match C{<TAB>} characters.

+        """

+        self.keepTabs = True

+        return self

+

+    def ignore( self, other ):

+        """

+        Define expression to be ignored (e.g., comments) while doing pattern

+        matching; may be called repeatedly, to define multiple comment or other

+        ignorable patterns.

+        

+        Example::

+            patt = OneOrMore(Word(alphas))

+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']

+            

+            patt.ignore(cStyleComment)

+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']

+        """

+        if isinstance(other, basestring):

+            other = Suppress(other)

+

+        if isinstance( other, Suppress ):

+            if other not in self.ignoreExprs:

+                self.ignoreExprs.append(other)

+        else:

+            self.ignoreExprs.append( Suppress( other.copy() ) )

+        return self

+

+    def setDebugActions( self, startAction, successAction, exceptionAction ):

+        """

+        Enable display of debugging messages while doing pattern matching.

+        """

+        self.debugActions = (startAction or _defaultStartDebugAction,

+                             successAction or _defaultSuccessDebugAction,

+                             exceptionAction or _defaultExceptionDebugAction)

+        self.debug = True

+        return self

+

+    def setDebug( self, flag=True ):

+        """

+        Enable display of debugging messages while doing pattern matching.

+        Set C{flag} to True to enable, False to disable.

+

+        Example::

+            wd = Word(alphas).setName("alphaword")

+            integer = Word(nums).setName("numword")

+            term = wd | integer

+            

+            # turn on debugging for wd

+            wd.setDebug()

+

+            OneOrMore(term).parseString("abc 123 xyz 890")

+        

+        prints::

+            Match alphaword at loc 0(1,1)

+            Matched alphaword -> ['abc']

+            Match alphaword at loc 3(1,4)

+            Exception raised:Expected alphaword (at char 4), (line:1, col:5)

+            Match alphaword at loc 7(1,8)

+            Matched alphaword -> ['xyz']

+            Match alphaword at loc 11(1,12)

+            Exception raised:Expected alphaword (at char 12), (line:1, col:13)

+            Match alphaword at loc 15(1,16)

+            Exception raised:Expected alphaword (at char 15), (line:1, col:16)

+

+        The output shown is that produced by the default debug actions - custom debug actions can be

+        specified using L{setDebugActions}. Prior to attempting

+        to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}

+        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}

+        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,

+        which makes debugging and exception messages easier to understand - for instance, the default

+        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.

+        """

+        if flag:

+            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )

+        else:

+            self.debug = False

+        return self

+

+    def __str__( self ):

+        return self.name

+

+    def __repr__( self ):

+        return _ustr(self)

+

+    def streamline( self ):

+        self.streamlined = True

+        self.strRepr = None

+        return self

+

+    def checkRecursion( self, parseElementList ):

+        pass

+

+    def validate( self, validateTrace=[] ):

+        """

+        Check defined expressions for valid structure, check for infinite recursive definitions.

+        """

+        self.checkRecursion( [] )

+

+    def parseFile( self, file_or_filename, parseAll=False ):

+        """

+        Execute the parse expression on the given file or filename.

+        If a filename is specified (instead of a file object),

+        the entire file is opened, read, and closed before parsing.

+        """

+        try:

+            file_contents = file_or_filename.read()

+        except AttributeError:

+            with open(file_or_filename, "r") as f:

+                file_contents = f.read()

+        try:

+            return self.parseString(file_contents, parseAll)

+        except ParseBaseException as exc:

+            if ParserElement.verbose_stacktrace:

+                raise

+            else:

+                # catch and re-raise exception from here, clears out pyparsing internal stack trace

+                raise exc

+

+    def __eq__(self,other):

+        if isinstance(other, ParserElement):

+            return self is other or vars(self) == vars(other)

+        elif isinstance(other, basestring):

+            return self.matches(other)

+        else:

+            return super(ParserElement,self)==other

+

+    def __ne__(self,other):

+        return not (self == other)

+

+    def __hash__(self):

+        return hash(id(self))

+

+    def __req__(self,other):

+        return self == other

+

+    def __rne__(self,other):

+        return not (self == other)

+

+    def matches(self, testString, parseAll=True):

+        """

+        Method for quick testing of a parser against a test string. Good for simple 

+        inline microtests of sub expressions while building up larger parser.

+           

+        Parameters:

+         - testString - to test against this expression for a match

+         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests

+            

+        Example::

+            expr = Word(nums)

+            assert expr.matches("100")

+        """

+        try:

+            self.parseString(_ustr(testString), parseAll=parseAll)

+            return True

+        except ParseBaseException:

+            return False

+                

+    def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):

+        """

+        Execute the parse expression on a series of test strings, showing each

+        test, the parsed results or where the parse failed. Quick and easy way to

+        run a parse expression against a list of sample strings.

+           

+        Parameters:

+         - tests - a list of separate test strings, or a multiline string of test strings

+         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           

+         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 

+              string; pass None to disable comment filtering

+         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;

+              if False, only dump nested list

+         - printResults - (default=C{True}) prints test output to stdout

+         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing

+

+        Returns: a (success, results) tuple, where success indicates that all tests succeeded

+        (or failed if C{failureTests} is True), and the results contain a list of lines of each 

+        test's output

+        

+        Example::

+            number_expr = pyparsing_common.number.copy()

+

+            result = number_expr.runTests('''

+                # unsigned integer

+                100

+                # negative integer

+                -100

+                # float with scientific notation

+                6.02e23

+                # integer with scientific notation

+                1e-12

+                ''')

+            print("Success" if result[0] else "Failed!")

+

+            result = number_expr.runTests('''

+                # stray character

+                100Z

+                # missing leading digit before '.'

+                -.100

+                # too many '.'

+                3.14.159

+                ''', failureTests=True)

+            print("Success" if result[0] else "Failed!")

+        prints::

+            # unsigned integer

+            100

+            [100]

+

+            # negative integer

+            -100

+            [-100]

+

+            # float with scientific notation

+            6.02e23

+            [6.02e+23]

+

+            # integer with scientific notation

+            1e-12

+            [1e-12]

+

+            Success

+            

+            # stray character

+            100Z

+               ^

+            FAIL: Expected end of text (at char 3), (line:1, col:4)

+

+            # missing leading digit before '.'

+            -.100

+            ^

+            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)

+

+            # too many '.'

+            3.14.159

+                ^

+            FAIL: Expected end of text (at char 4), (line:1, col:5)

+

+            Success

+

+        Each test string must be on a single line. If you want to test a string that spans multiple

+        lines, create a test like this::

+

+            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")

+        

+        (Note that this is a raw string literal, you must include the leading 'r'.)

+        """

+        if isinstance(tests, basestring):

+            tests = list(map(str.strip, tests.rstrip().splitlines()))

+        if isinstance(comment, basestring):

+            comment = Literal(comment)

+        allResults = []

+        comments = []

+        success = True

+        for t in tests:

+            if comment is not None and comment.matches(t, False) or comments and not t:

+                comments.append(t)

+                continue

+            if not t:

+                continue

+            out = ['\n'.join(comments), t]

+            comments = []

+            try:

+                t = t.replace(r'\n','\n')

+                result = self.parseString(t, parseAll=parseAll)

+                out.append(result.dump(full=fullDump))

+                success = success and not failureTests

+            except ParseBaseException as pe:

+                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""

+                if '\n' in t:

+                    out.append(line(pe.loc, t))

+                    out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)

+                else:

+                    out.append(' '*pe.loc + '^' + fatal)

+                out.append("FAIL: " + str(pe))

+                success = success and failureTests

+                result = pe

+            except Exception as exc:

+                out.append("FAIL-EXCEPTION: " + str(exc))

+                success = success and failureTests

+                result = exc

+

+            if printResults:

+                if fullDump:

+                    out.append('')

+                print('\n'.join(out))

+

+            allResults.append((t, result))

+        

+        return success, allResults

+

+        

+class Token(ParserElement):

+    """

+    Abstract C{ParserElement} subclass, for defining atomic matching patterns.

+    """

+    def __init__( self ):

+        super(Token,self).__init__( savelist=False )

+

+

+class Empty(Token):

+    """

+    An empty token, will always match.

+    """

+    def __init__( self ):

+        super(Empty,self).__init__()

+        self.name = "Empty"

+        self.mayReturnEmpty = True

+        self.mayIndexError = False

+

+

+class NoMatch(Token):

+    """

+    A token that will never match.

+    """

+    def __init__( self ):

+        super(NoMatch,self).__init__()

+        self.name = "NoMatch"

+        self.mayReturnEmpty = True

+        self.mayIndexError = False

+        self.errmsg = "Unmatchable token"

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        raise ParseException(instring, loc, self.errmsg, self)

+

+

+class Literal(Token):

+    """

+    Token to exactly match a specified string.

+    

+    Example::

+        Literal('blah').parseString('blah')  # -> ['blah']

+        Literal('blah').parseString('blahfooblah')  # -> ['blah']

+        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"

+    

+    For case-insensitive matching, use L{CaselessLiteral}.

+    

+    For keyword matching (force word break before and after the matched string),

+    use L{Keyword} or L{CaselessKeyword}.

+    """

+    def __init__( self, matchString ):

+        super(Literal,self).__init__()

+        self.match = matchString

+        self.matchLen = len(matchString)

+        try:

+            self.firstMatchChar = matchString[0]

+        except IndexError:

+            warnings.warn("null string passed to Literal; use Empty() instead",

+                            SyntaxWarning, stacklevel=2)

+            self.__class__ = Empty

+        self.name = '"%s"' % _ustr(self.match)

+        self.errmsg = "Expected " + self.name

+        self.mayReturnEmpty = False

+        self.mayIndexError = False

+

+    # Performance tuning: this routine gets called a *lot*

+    # if this is a single character match string  and the first character matches,

+    # short-circuit as quickly as possible, and avoid calling startswith

+    #~ @profile

+    def parseImpl( self, instring, loc, doActions=True ):

+        if (instring[loc] == self.firstMatchChar and

+            (self.matchLen==1 or instring.startswith(self.match,loc)) ):

+            return loc+self.matchLen, self.match

+        raise ParseException(instring, loc, self.errmsg, self)

+_L = Literal

+ParserElement._literalStringClass = Literal

+

+class Keyword(Token):

+    """

+    Token to exactly match a specified string as a keyword, that is, it must be

+    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:

+     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.

+     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}

+    Accepts two optional constructor arguments in addition to the keyword string:

+     - C{identChars} is a string of characters that would be valid identifier characters,

+          defaulting to all alphanumerics + "_" and "$"

+     - C{caseless} allows case-insensitive matching, default is C{False}.

+       

+    Example::

+        Keyword("start").parseString("start")  # -> ['start']

+        Keyword("start").parseString("starting")  # -> Exception

+

+    For case-insensitive matching, use L{CaselessKeyword}.

+    """

+    DEFAULT_KEYWORD_CHARS = alphanums+"_$"

+

+    def __init__( self, matchString, identChars=None, caseless=False ):

+        super(Keyword,self).__init__()

+        if identChars is None:

+            identChars = Keyword.DEFAULT_KEYWORD_CHARS

+        self.match = matchString

+        self.matchLen = len(matchString)

+        try:

+            self.firstMatchChar = matchString[0]

+        except IndexError:

+            warnings.warn("null string passed to Keyword; use Empty() instead",

+                            SyntaxWarning, stacklevel=2)

+        self.name = '"%s"' % self.match

+        self.errmsg = "Expected " + self.name

+        self.mayReturnEmpty = False

+        self.mayIndexError = False

+        self.caseless = caseless

+        if caseless:

+            self.caselessmatch = matchString.upper()

+            identChars = identChars.upper()

+        self.identChars = set(identChars)

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if self.caseless:

+            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and

+                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and

+                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):

+                return loc+self.matchLen, self.match

+        else:

+            if (instring[loc] == self.firstMatchChar and

+                (self.matchLen==1 or instring.startswith(self.match,loc)) and

+                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and

+                (loc == 0 or instring[loc-1] not in self.identChars) ):

+                return loc+self.matchLen, self.match

+        raise ParseException(instring, loc, self.errmsg, self)

+

+    def copy(self):

+        c = super(Keyword,self).copy()

+        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS

+        return c

+

+    @staticmethod

+    def setDefaultKeywordChars( chars ):

+        """Overrides the default Keyword chars

+        """

+        Keyword.DEFAULT_KEYWORD_CHARS = chars

+

+class CaselessLiteral(Literal):

+    """

+    Token to match a specified string, ignoring case of letters.

+    Note: the matched results will always be in the case of the given

+    match string, NOT the case of the input text.

+

+    Example::

+        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']

+        

+    (Contrast with example for L{CaselessKeyword}.)

+    """

+    def __init__( self, matchString ):

+        super(CaselessLiteral,self).__init__( matchString.upper() )

+        # Preserve the defining literal.

+        self.returnString = matchString

+        self.name = "'%s'" % self.returnString

+        self.errmsg = "Expected " + self.name

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if instring[ loc:loc+self.matchLen ].upper() == self.match:

+            return loc+self.matchLen, self.returnString

+        raise ParseException(instring, loc, self.errmsg, self)

+

+class CaselessKeyword(Keyword):

+    """

+    Caseless version of L{Keyword}.

+

+    Example::

+        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']

+        

+    (Contrast with example for L{CaselessLiteral}.)

+    """

+    def __init__( self, matchString, identChars=None ):

+        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and

+             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):

+            return loc+self.matchLen, self.match

+        raise ParseException(instring, loc, self.errmsg, self)

+

+class CloseMatch(Token):

+    """

+    A variation on L{Literal} which matches "close" matches, that is, 

+    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:

+     - C{match_string} - string to be matched

+     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match

+    

+    The results from a successful parse will contain the matched text from the input string and the following named results:

+     - C{mismatches} - a list of the positions within the match_string where mismatches were found

+     - C{original} - the original match_string used to compare against the input string

+    

+    If C{mismatches} is an empty list, then the match was an exact match.

+    

+    Example::

+        patt = CloseMatch("ATCATCGAATGGA")

+        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})

+        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)

+

+        # exact match

+        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})

+

+        # close match allowing up to 2 mismatches

+        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)

+        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})

+    """

+    def __init__(self, match_string, maxMismatches=1):

+        super(CloseMatch,self).__init__()

+        self.name = match_string

+        self.match_string = match_string

+        self.maxMismatches = maxMismatches

+        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)

+        self.mayIndexError = False

+        self.mayReturnEmpty = False

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        start = loc

+        instrlen = len(instring)

+        maxloc = start + len(self.match_string)

+

+        if maxloc <= instrlen:

+            match_string = self.match_string

+            match_stringloc = 0

+            mismatches = []

+            maxMismatches = self.maxMismatches

+

+            for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):

+                src,mat = s_m

+                if src != mat:

+                    mismatches.append(match_stringloc)

+                    if len(mismatches) > maxMismatches:

+                        break

+            else:

+                loc = match_stringloc + 1

+                results = ParseResults([instring[start:loc]])

+                results['original'] = self.match_string

+                results['mismatches'] = mismatches

+                return loc, results

+

+        raise ParseException(instring, loc, self.errmsg, self)

+

+

+class Word(Token):

+    """

+    Token for matching words composed of allowed character sets.

+    Defined with string containing all allowed initial characters,

+    an optional string containing allowed body characters (if omitted,

+    defaults to the initial character set), and an optional minimum,

+    maximum, and/or exact length.  The default value for C{min} is 1 (a

+    minimum value < 1 is not valid); the default values for C{max} and C{exact}

+    are 0, meaning no maximum or exact length restriction. An optional

+    C{excludeChars} parameter can list characters that might be found in 

+    the input C{bodyChars} string; useful to define a word of all printables

+    except for one or two characters, for instance.

+    

+    L{srange} is useful for defining custom character set strings for defining 

+    C{Word} expressions, using range notation from regular expression character sets.

+    

+    A common mistake is to use C{Word} to match a specific literal string, as in 

+    C{Word("Address")}. Remember that C{Word} uses the string argument to define

+    I{sets} of matchable characters. This expression would match "Add", "AAA",

+    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.

+    To match an exact literal string, use L{Literal} or L{Keyword}.

+

+    pyparsing includes helper strings for building Words:

+     - L{alphas}

+     - L{nums}

+     - L{alphanums}

+     - L{hexnums}

+     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)

+     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)

+     - L{printables} (any non-whitespace character)

+

+    Example::

+        # a word composed of digits

+        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))

+        

+        # a word with a leading capital, and zero or more lowercase

+        capital_word = Word(alphas.upper(), alphas.lower())

+

+        # hostnames are alphanumeric, with leading alpha, and '-'

+        hostname = Word(alphas, alphanums+'-')

+        

+        # roman numeral (not a strict parser, accepts invalid mix of characters)

+        roman = Word("IVXLCDM")

+        

+        # any string of non-whitespace characters, except for ','

+        csv_value = Word(printables, excludeChars=",")

+    """

+    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):

+        super(Word,self).__init__()

+        if excludeChars:

+            initChars = ''.join(c for c in initChars if c not in excludeChars)

+            if bodyChars:

+                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)

+        self.initCharsOrig = initChars

+        self.initChars = set(initChars)

+        if bodyChars :

+            self.bodyCharsOrig = bodyChars

+            self.bodyChars = set(bodyChars)

+        else:

+            self.bodyCharsOrig = initChars

+            self.bodyChars = set(initChars)

+

+        self.maxSpecified = max > 0

+

+        if min < 1:

+            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")

+

+        self.minLen = min

+

+        if max > 0:

+            self.maxLen = max

+        else:

+            self.maxLen = _MAX_INT

+

+        if exact > 0:

+            self.maxLen = exact

+            self.minLen = exact

+

+        self.name = _ustr(self)

+        self.errmsg = "Expected " + self.name

+        self.mayIndexError = False

+        self.asKeyword = asKeyword

+

+        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):

+            if self.bodyCharsOrig == self.initCharsOrig:

+                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)

+            elif len(self.initCharsOrig) == 1:

+                self.reString = "%s[%s]*" % \

+                                      (re.escape(self.initCharsOrig),

+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)

+            else:

+                self.reString = "[%s][%s]*" % \

+                                      (_escapeRegexRangeChars(self.initCharsOrig),

+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)

+            if self.asKeyword:

+                self.reString = r"\b"+self.reString+r"\b"

+            try:

+                self.re = re.compile( self.reString )

+            except Exception:

+                self.re = None

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if self.re:

+            result = self.re.match(instring,loc)

+            if not result:

+                raise ParseException(instring, loc, self.errmsg, self)

+

+            loc = result.end()

+            return loc, result.group()

+

+        if not(instring[ loc ] in self.initChars):

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        start = loc

+        loc += 1

+        instrlen = len(instring)

+        bodychars = self.bodyChars

+        maxloc = start + self.maxLen

+        maxloc = min( maxloc, instrlen )

+        while loc < maxloc and instring[loc] in bodychars:

+            loc += 1

+

+        throwException = False

+        if loc - start < self.minLen:

+            throwException = True

+        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:

+            throwException = True

+        if self.asKeyword:

+            if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):

+                throwException = True

+

+        if throwException:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        return loc, instring[start:loc]

+

+    def __str__( self ):

+        try:

+            return super(Word,self).__str__()

+        except Exception:

+            pass

+

+

+        if self.strRepr is None:

+

+            def charsAsStr(s):

+                if len(s)>4:

+                    return s[:4]+"..."

+                else:

+                    return s

+

+            if ( self.initCharsOrig != self.bodyCharsOrig ):

+                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )

+            else:

+                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)

+

+        return self.strRepr

+

+

+class Regex(Token):

+    """

+    Token for matching strings that match a given regular expression.

+    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.

+    If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as 

+    named parse results.

+

+    Example::

+        realnum = Regex(r"[+-]?\d+\.\d*")

+        date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')

+        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression

+        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")

+    """

+    compiledREtype = type(re.compile("[A-Z]"))

+    def __init__( self, pattern, flags=0):

+        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""

+        super(Regex,self).__init__()

+

+        if isinstance(pattern, basestring):

+            if not pattern:

+                warnings.warn("null string passed to Regex; use Empty() instead",

+                        SyntaxWarning, stacklevel=2)

+

+            self.pattern = pattern

+            self.flags = flags

+

+            try:

+                self.re = re.compile(self.pattern, self.flags)

+                self.reString = self.pattern

+            except sre_constants.error:

+                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,

+                    SyntaxWarning, stacklevel=2)

+                raise

+

+        elif isinstance(pattern, Regex.compiledREtype):

+            self.re = pattern

+            self.pattern = \

+            self.reString = str(pattern)

+            self.flags = flags

+            

+        else:

+            raise ValueError("Regex may only be constructed with a string or a compiled RE object")

+

+        self.name = _ustr(self)

+        self.errmsg = "Expected " + self.name

+        self.mayIndexError = False

+        self.mayReturnEmpty = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        result = self.re.match(instring,loc)

+        if not result:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        loc = result.end()

+        d = result.groupdict()

+        ret = ParseResults(result.group())

+        if d:

+            for k in d:

+                ret[k] = d[k]

+        return loc,ret

+

+    def __str__( self ):

+        try:

+            return super(Regex,self).__str__()

+        except Exception:

+            pass

+

+        if self.strRepr is None:

+            self.strRepr = "Re:(%s)" % repr(self.pattern)

+

+        return self.strRepr

+

+

+class QuotedString(Token):

+    r"""

+    Token for matching strings that are delimited by quoting characters.

+    

+    Defined with the following parameters:

+        - quoteChar - string of one or more characters defining the quote delimiting string

+        - escChar - character to escape quotes, typically backslash (default=C{None})

+        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})

+        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})

+        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})

+        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)

+        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})

+

+    Example::

+        qs = QuotedString('"')

+        print(qs.searchString('lsjdf "This is the quote" sldjf'))

+        complex_qs = QuotedString('{{', endQuoteChar='}}')

+        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))

+        sql_qs = QuotedString('"', escQuote='""')

+        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))

+    prints::

+        [['This is the quote']]

+        [['This is the "quote"']]

+        [['This is the quote with "embedded" quotes']]

+    """

+    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):

+        super(QuotedString,self).__init__()

+

+        # remove white space from quote chars - wont work anyway

+        quoteChar = quoteChar.strip()

+        if not quoteChar:

+            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)

+            raise SyntaxError()

+

+        if endQuoteChar is None:

+            endQuoteChar = quoteChar

+        else:

+            endQuoteChar = endQuoteChar.strip()

+            if not endQuoteChar:

+                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)

+                raise SyntaxError()

+

+        self.quoteChar = quoteChar

+        self.quoteCharLen = len(quoteChar)

+        self.firstQuoteChar = quoteChar[0]

+        self.endQuoteChar = endQuoteChar

+        self.endQuoteCharLen = len(endQuoteChar)

+        self.escChar = escChar

+        self.escQuote = escQuote

+        self.unquoteResults = unquoteResults

+        self.convertWhitespaceEscapes = convertWhitespaceEscapes

+

+        if multiline:

+            self.flags = re.MULTILINE | re.DOTALL

+            self.pattern = r'%s(?:[^%s%s]' % \

+                ( re.escape(self.quoteChar),

+                  _escapeRegexRangeChars(self.endQuoteChar[0]),

+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )

+        else:

+            self.flags = 0

+            self.pattern = r'%s(?:[^%s\n\r%s]' % \

+                ( re.escape(self.quoteChar),

+                  _escapeRegexRangeChars(self.endQuoteChar[0]),

+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )

+        if len(self.endQuoteChar) > 1:

+            self.pattern += (

+                '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),

+                                               _escapeRegexRangeChars(self.endQuoteChar[i]))

+                                    for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'

+                )

+        if escQuote:

+            self.pattern += (r'|(?:%s)' % re.escape(escQuote))

+        if escChar:

+            self.pattern += (r'|(?:%s.)' % re.escape(escChar))

+            self.escCharReplacePattern = re.escape(self.escChar)+"(.)"

+        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))

+

+        try:

+            self.re = re.compile(self.pattern, self.flags)

+            self.reString = self.pattern

+        except sre_constants.error:

+            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,

+                SyntaxWarning, stacklevel=2)

+            raise

+

+        self.name = _ustr(self)

+        self.errmsg = "Expected " + self.name

+        self.mayIndexError = False

+        self.mayReturnEmpty = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None

+        if not result:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        loc = result.end()

+        ret = result.group()

+

+        if self.unquoteResults:

+

+            # strip off quotes

+            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]

+

+            if isinstance(ret,basestring):

+                # replace escaped whitespace

+                if '\\' in ret and self.convertWhitespaceEscapes:

+                    ws_map = {

+                        r'\t' : '\t',

+                        r'\n' : '\n',

+                        r'\f' : '\f',

+                        r'\r' : '\r',

+                    }

+                    for wslit,wschar in ws_map.items():

+                        ret = ret.replace(wslit, wschar)

+

+                # replace escaped characters

+                if self.escChar:

+                    ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)

+

+                # replace escaped quotes

+                if self.escQuote:

+                    ret = ret.replace(self.escQuote, self.endQuoteChar)

+

+        return loc, ret

+

+    def __str__( self ):

+        try:

+            return super(QuotedString,self).__str__()

+        except Exception:

+            pass

+

+        if self.strRepr is None:

+            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)

+

+        return self.strRepr

+

+

+class CharsNotIn(Token):

+    """

+    Token for matching words composed of characters I{not} in a given set (will

+    include whitespace in matched characters if not listed in the provided exclusion set - see example).

+    Defined with string containing all disallowed characters, and an optional

+    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a

+    minimum value < 1 is not valid); the default values for C{max} and C{exact}

+    are 0, meaning no maximum or exact length restriction.

+

+    Example::

+        # define a comma-separated-value as anything that is not a ','

+        csv_value = CharsNotIn(',')

+        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))

+    prints::

+        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']

+    """

+    def __init__( self, notChars, min=1, max=0, exact=0 ):

+        super(CharsNotIn,self).__init__()

+        self.skipWhitespace = False

+        self.notChars = notChars

+

+        if min < 1:

+            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")

+

+        self.minLen = min

+

+        if max > 0:

+            self.maxLen = max

+        else:

+            self.maxLen = _MAX_INT

+

+        if exact > 0:

+            self.maxLen = exact

+            self.minLen = exact

+

+        self.name = _ustr(self)

+        self.errmsg = "Expected " + self.name

+        self.mayReturnEmpty = ( self.minLen == 0 )

+        self.mayIndexError = False

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if instring[loc] in self.notChars:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        start = loc

+        loc += 1

+        notchars = self.notChars

+        maxlen = min( start+self.maxLen, len(instring) )

+        while loc < maxlen and \

+              (instring[loc] not in notchars):

+            loc += 1

+

+        if loc - start < self.minLen:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        return loc, instring[start:loc]

+

+    def __str__( self ):

+        try:

+            return super(CharsNotIn, self).__str__()

+        except Exception:

+            pass

+

+        if self.strRepr is None:

+            if len(self.notChars) > 4:

+                self.strRepr = "!W:(%s...)" % self.notChars[:4]

+            else:

+                self.strRepr = "!W:(%s)" % self.notChars

+

+        return self.strRepr

+

+class White(Token):

+    """

+    Special matching class for matching whitespace.  Normally, whitespace is ignored

+    by pyparsing grammars.  This class is included when some whitespace structures

+    are significant.  Define with a string containing the whitespace characters to be

+    matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,

+    as defined for the C{L{Word}} class.

+    """

+    whiteStrs = {

+        " " : "<SPC>",

+        "\t": "<TAB>",

+        "\n": "<LF>",

+        "\r": "<CR>",

+        "\f": "<FF>",

+        }

+    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):

+        super(White,self).__init__()

+        self.matchWhite = ws

+        self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )

+        #~ self.leaveWhitespace()

+        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))

+        self.mayReturnEmpty = True

+        self.errmsg = "Expected " + self.name

+

+        self.minLen = min

+

+        if max > 0:

+            self.maxLen = max

+        else:

+            self.maxLen = _MAX_INT

+

+        if exact > 0:

+            self.maxLen = exact

+            self.minLen = exact

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if not(instring[ loc ] in self.matchWhite):

+            raise ParseException(instring, loc, self.errmsg, self)

+        start = loc

+        loc += 1

+        maxloc = start + self.maxLen

+        maxloc = min( maxloc, len(instring) )

+        while loc < maxloc and instring[loc] in self.matchWhite:

+            loc += 1

+

+        if loc - start < self.minLen:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        return loc, instring[start:loc]

+

+

+class _PositionToken(Token):

+    def __init__( self ):

+        super(_PositionToken,self).__init__()

+        self.name=self.__class__.__name__

+        self.mayReturnEmpty = True

+        self.mayIndexError = False

+

+class GoToColumn(_PositionToken):

+    """

+    Token to advance to a specific column of input text; useful for tabular report scraping.

+    """

+    def __init__( self, colno ):

+        super(GoToColumn,self).__init__()

+        self.col = colno

+

+    def preParse( self, instring, loc ):

+        if col(loc,instring) != self.col:

+            instrlen = len(instring)

+            if self.ignoreExprs:

+                loc = self._skipIgnorables( instring, loc )

+            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :

+                loc += 1

+        return loc

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        thiscol = col( loc, instring )

+        if thiscol > self.col:

+            raise ParseException( instring, loc, "Text not in expected column", self )

+        newloc = loc + self.col - thiscol

+        ret = instring[ loc: newloc ]

+        return newloc, ret

+

+

+class LineStart(_PositionToken):

+    """

+    Matches if current position is at the beginning of a line within the parse string

+    

+    Example::

+    

+        test = '''\

+        AAA this line

+        AAA and this line

+          AAA but not this one

+        B AAA and definitely not this one

+        '''

+

+        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):

+            print(t)

+    

+    Prints::

+        ['AAA', ' this line']

+        ['AAA', ' and this line']    

+

+    """

+    def __init__( self ):

+        super(LineStart,self).__init__()

+        self.errmsg = "Expected start of line"

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if col(loc, instring) == 1:

+            return loc, []

+        raise ParseException(instring, loc, self.errmsg, self)

+

+class LineEnd(_PositionToken):

+    """

+    Matches if current position is at the end of a line within the parse string

+    """

+    def __init__( self ):

+        super(LineEnd,self).__init__()

+        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )

+        self.errmsg = "Expected end of line"

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if loc<len(instring):

+            if instring[loc] == "\n":

+                return loc+1, "\n"

+            else:

+                raise ParseException(instring, loc, self.errmsg, self)

+        elif loc == len(instring):

+            return loc+1, []

+        else:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+class StringStart(_PositionToken):

+    """

+    Matches if current position is at the beginning of the parse string

+    """

+    def __init__( self ):

+        super(StringStart,self).__init__()

+        self.errmsg = "Expected start of text"

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if loc != 0:

+            # see if entire string up to here is just whitespace and ignoreables

+            if loc != self.preParse( instring, 0 ):

+                raise ParseException(instring, loc, self.errmsg, self)

+        return loc, []

+

+class StringEnd(_PositionToken):

+    """

+    Matches if current position is at the end of the parse string

+    """

+    def __init__( self ):

+        super(StringEnd,self).__init__()

+        self.errmsg = "Expected end of text"

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if loc < len(instring):

+            raise ParseException(instring, loc, self.errmsg, self)

+        elif loc == len(instring):

+            return loc+1, []

+        elif loc > len(instring):

+            return loc, []

+        else:

+            raise ParseException(instring, loc, self.errmsg, self)

+

+class WordStart(_PositionToken):

+    """

+    Matches if the current position is at the beginning of a Word, and

+    is not preceded by any character in a given set of C{wordChars}

+    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,

+    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of

+    the string being parsed, or at the beginning of a line.

+    """

+    def __init__(self, wordChars = printables):

+        super(WordStart,self).__init__()

+        self.wordChars = set(wordChars)

+        self.errmsg = "Not at the start of a word"

+

+    def parseImpl(self, instring, loc, doActions=True ):

+        if loc != 0:

+            if (instring[loc-1] in self.wordChars or

+                instring[loc] not in self.wordChars):

+                raise ParseException(instring, loc, self.errmsg, self)

+        return loc, []

+

+class WordEnd(_PositionToken):

+    """

+    Matches if the current position is at the end of a Word, and

+    is not followed by any character in a given set of C{wordChars}

+    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,

+    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of

+    the string being parsed, or at the end of a line.

+    """

+    def __init__(self, wordChars = printables):

+        super(WordEnd,self).__init__()

+        self.wordChars = set(wordChars)

+        self.skipWhitespace = False

+        self.errmsg = "Not at the end of a word"

+

+    def parseImpl(self, instring, loc, doActions=True ):

+        instrlen = len(instring)

+        if instrlen>0 and loc<instrlen:

+            if (instring[loc] in self.wordChars or

+                instring[loc-1] not in self.wordChars):

+                raise ParseException(instring, loc, self.errmsg, self)

+        return loc, []

+

+

+class ParseExpression(ParserElement):

+    """

+    Abstract subclass of ParserElement, for combining and post-processing parsed tokens.

+    """

+    def __init__( self, exprs, savelist = False ):

+        super(ParseExpression,self).__init__(savelist)

+        if isinstance( exprs, _generatorType ):

+            exprs = list(exprs)

+

+        if isinstance( exprs, basestring ):

+            self.exprs = [ ParserElement._literalStringClass( exprs ) ]

+        elif isinstance( exprs, collections.Iterable ):

+            exprs = list(exprs)

+            # if sequence of strings provided, wrap with Literal

+            if all(isinstance(expr, basestring) for expr in exprs):

+                exprs = map(ParserElement._literalStringClass, exprs)

+            self.exprs = list(exprs)

+        else:

+            try:

+                self.exprs = list( exprs )

+            except TypeError:

+                self.exprs = [ exprs ]

+        self.callPreparse = False

+

+    def __getitem__( self, i ):

+        return self.exprs[i]

+

+    def append( self, other ):

+        self.exprs.append( other )

+        self.strRepr = None

+        return self

+

+    def leaveWhitespace( self ):

+        """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on

+           all contained expressions."""

+        self.skipWhitespace = False

+        self.exprs = [ e.copy() for e in self.exprs ]

+        for e in self.exprs:

+            e.leaveWhitespace()

+        return self

+

+    def ignore( self, other ):

+        if isinstance( other, Suppress ):

+            if other not in self.ignoreExprs:

+                super( ParseExpression, self).ignore( other )

+                for e in self.exprs:

+                    e.ignore( self.ignoreExprs[-1] )

+        else:

+            super( ParseExpression, self).ignore( other )

+            for e in self.exprs:

+                e.ignore( self.ignoreExprs[-1] )

+        return self

+

+    def __str__( self ):

+        try:

+            return super(ParseExpression,self).__str__()

+        except Exception:

+            pass

+

+        if self.strRepr is None:

+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )

+        return self.strRepr

+

+    def streamline( self ):

+        super(ParseExpression,self).streamline()

+

+        for e in self.exprs:

+            e.streamline()

+

+        # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )

+        # but only if there are no parse actions or resultsNames on the nested And's

+        # (likewise for Or's and MatchFirst's)

+        if ( len(self.exprs) == 2 ):

+            other = self.exprs[0]

+            if ( isinstance( other, self.__class__ ) and

+                  not(other.parseAction) and

+                  other.resultsName is None and

+                  not other.debug ):

+                self.exprs = other.exprs[:] + [ self.exprs[1] ]

+                self.strRepr = None

+                self.mayReturnEmpty |= other.mayReturnEmpty

+                self.mayIndexError  |= other.mayIndexError

+

+            other = self.exprs[-1]

+            if ( isinstance( other, self.__class__ ) and

+                  not(other.parseAction) and

+                  other.resultsName is None and

+                  not other.debug ):

+                self.exprs = self.exprs[:-1] + other.exprs[:]

+                self.strRepr = None

+                self.mayReturnEmpty |= other.mayReturnEmpty

+                self.mayIndexError  |= other.mayIndexError

+

+        self.errmsg = "Expected " + _ustr(self)

+        

+        return self

+

+    def setResultsName( self, name, listAllMatches=False ):

+        ret = super(ParseExpression,self).setResultsName(name,listAllMatches)

+        return ret

+

+    def validate( self, validateTrace=[] ):

+        tmp = validateTrace[:]+[self]

+        for e in self.exprs:

+            e.validate(tmp)

+        self.checkRecursion( [] )

+        

+    def copy(self):

+        ret = super(ParseExpression,self).copy()

+        ret.exprs = [e.copy() for e in self.exprs]

+        return ret

+

+class And(ParseExpression):

+    """

+    Requires all given C{ParseExpression}s to be found in the given order.

+    Expressions may be separated by whitespace.

+    May be constructed using the C{'+'} operator.

+    May also be constructed using the C{'-'} operator, which will suppress backtracking.

+

+    Example::

+        integer = Word(nums)

+        name_expr = OneOrMore(Word(alphas))

+

+        expr = And([integer("id"),name_expr("name"),integer("age")])

+        # more easily written as:

+        expr = integer("id") + name_expr("name") + integer("age")

+    """

+

+    class _ErrorStop(Empty):

+        def __init__(self, *args, **kwargs):

+            super(And._ErrorStop,self).__init__(*args, **kwargs)

+            self.name = '-'

+            self.leaveWhitespace()

+

+    def __init__( self, exprs, savelist = True ):

+        super(And,self).__init__(exprs, savelist)

+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)

+        self.setWhitespaceChars( self.exprs[0].whiteChars )

+        self.skipWhitespace = self.exprs[0].skipWhitespace

+        self.callPreparse = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        # pass False as last arg to _parse for first element, since we already

+        # pre-parsed the string as part of our And pre-parsing

+        loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )

+        errorStop = False

+        for e in self.exprs[1:]:

+            if isinstance(e, And._ErrorStop):

+                errorStop = True

+                continue

+            if errorStop:

+                try:

+                    loc, exprtokens = e._parse( instring, loc, doActions )

+                except ParseSyntaxException:

+                    raise

+                except ParseBaseException as pe:

+                    pe.__traceback__ = None

+                    raise ParseSyntaxException._from_exception(pe)

+                except IndexError:

+                    raise ParseSyntaxException(instring, len(instring), self.errmsg, self)

+            else:

+                loc, exprtokens = e._parse( instring, loc, doActions )

+            if exprtokens or exprtokens.haskeys():

+                resultlist += exprtokens

+        return loc, resultlist

+

+    def __iadd__(self, other ):

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        return self.append( other ) #And( [ self, other ] )

+

+    def checkRecursion( self, parseElementList ):

+        subRecCheckList = parseElementList[:] + [ self ]

+        for e in self.exprs:

+            e.checkRecursion( subRecCheckList )

+            if not e.mayReturnEmpty:

+                break

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"

+

+        return self.strRepr

+

+

+class Or(ParseExpression):

+    """

+    Requires that at least one C{ParseExpression} is found.

+    If two expressions match, the expression that matches the longest string will be used.

+    May be constructed using the C{'^'} operator.

+

+    Example::

+        # construct Or using '^' operator

+        

+        number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))

+        print(number.searchString("123 3.1416 789"))

+    prints::

+        [['123'], ['3.1416'], ['789']]

+    """

+    def __init__( self, exprs, savelist = False ):

+        super(Or,self).__init__(exprs, savelist)

+        if self.exprs:

+            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)

+        else:

+            self.mayReturnEmpty = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        maxExcLoc = -1

+        maxException = None

+        matches = []

+        for e in self.exprs:

+            try:

+                loc2 = e.tryParse( instring, loc )

+            except ParseException as err:

+                err.__traceback__ = None

+                if err.loc > maxExcLoc:

+                    maxException = err

+                    maxExcLoc = err.loc

+            except IndexError:

+                if len(instring) > maxExcLoc:

+                    maxException = ParseException(instring,len(instring),e.errmsg,self)

+                    maxExcLoc = len(instring)

+            else:

+                # save match among all matches, to retry longest to shortest

+                matches.append((loc2, e))

+

+        if matches:

+            matches.sort(key=lambda x: -x[0])

+            for _,e in matches:

+                try:

+                    return e._parse( instring, loc, doActions )

+                except ParseException as err:

+                    err.__traceback__ = None

+                    if err.loc > maxExcLoc:

+                        maxException = err

+                        maxExcLoc = err.loc

+

+        if maxException is not None:

+            maxException.msg = self.errmsg

+            raise maxException

+        else:

+            raise ParseException(instring, loc, "no defined alternatives to match", self)

+

+

+    def __ixor__(self, other ):

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        return self.append( other ) #Or( [ self, other ] )

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"

+

+        return self.strRepr

+

+    def checkRecursion( self, parseElementList ):

+        subRecCheckList = parseElementList[:] + [ self ]

+        for e in self.exprs:

+            e.checkRecursion( subRecCheckList )

+

+

+class MatchFirst(ParseExpression):

+    """

+    Requires that at least one C{ParseExpression} is found.

+    If two expressions match, the first one listed is the one that will match.

+    May be constructed using the C{'|'} operator.

+

+    Example::

+        # construct MatchFirst using '|' operator

+        

+        # watch the order of expressions to match

+        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))

+        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]

+

+        # put more selective expression first

+        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)

+        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]

+    """

+    def __init__( self, exprs, savelist = False ):

+        super(MatchFirst,self).__init__(exprs, savelist)

+        if self.exprs:

+            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)

+        else:

+            self.mayReturnEmpty = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        maxExcLoc = -1

+        maxException = None

+        for e in self.exprs:

+            try:

+                ret = e._parse( instring, loc, doActions )

+                return ret

+            except ParseException as err:

+                if err.loc > maxExcLoc:

+                    maxException = err

+                    maxExcLoc = err.loc

+            except IndexError:

+                if len(instring) > maxExcLoc:

+                    maxException = ParseException(instring,len(instring),e.errmsg,self)

+                    maxExcLoc = len(instring)

+

+        # only got here if no expression matched, raise exception for match that made it the furthest

+        else:

+            if maxException is not None:

+                maxException.msg = self.errmsg

+                raise maxException

+            else:

+                raise ParseException(instring, loc, "no defined alternatives to match", self)

+

+    def __ior__(self, other ):

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass( other )

+        return self.append( other ) #MatchFirst( [ self, other ] )

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"

+

+        return self.strRepr

+

+    def checkRecursion( self, parseElementList ):

+        subRecCheckList = parseElementList[:] + [ self ]

+        for e in self.exprs:

+            e.checkRecursion( subRecCheckList )

+

+

+class Each(ParseExpression):

+    """

+    Requires all given C{ParseExpression}s to be found, but in any order.

+    Expressions may be separated by whitespace.

+    May be constructed using the C{'&'} operator.

+

+    Example::

+        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")

+        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")

+        integer = Word(nums)

+        shape_attr = "shape:" + shape_type("shape")

+        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")

+        color_attr = "color:" + color("color")

+        size_attr = "size:" + integer("size")

+

+        # use Each (using operator '&') to accept attributes in any order 

+        # (shape and posn are required, color and size are optional)

+        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)

+

+        shape_spec.runTests('''

+            shape: SQUARE color: BLACK posn: 100, 120

+            shape: CIRCLE size: 50 color: BLUE posn: 50,80

+            color:GREEN size:20 shape:TRIANGLE posn:20,40

+            '''

+            )

+    prints::

+        shape: SQUARE color: BLACK posn: 100, 120

+        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]

+        - color: BLACK

+        - posn: ['100', ',', '120']

+          - x: 100

+          - y: 120

+        - shape: SQUARE

+

+

+        shape: CIRCLE size: 50 color: BLUE posn: 50,80

+        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]

+        - color: BLUE

+        - posn: ['50', ',', '80']

+          - x: 50

+          - y: 80

+        - shape: CIRCLE

+        - size: 50

+

+

+        color: GREEN size: 20 shape: TRIANGLE posn: 20,40

+        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]

+        - color: GREEN

+        - posn: ['20', ',', '40']

+          - x: 20

+          - y: 40

+        - shape: TRIANGLE

+        - size: 20

+    """

+    def __init__( self, exprs, savelist = True ):

+        super(Each,self).__init__(exprs, savelist)

+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)

+        self.skipWhitespace = True

+        self.initExprGroups = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if self.initExprGroups:

+            self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))

+            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]

+            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]

+            self.optionals = opt1 + opt2

+            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]

+            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]

+            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]

+            self.required += self.multirequired

+            self.initExprGroups = False

+        tmpLoc = loc

+        tmpReqd = self.required[:]

+        tmpOpt  = self.optionals[:]

+        matchOrder = []

+

+        keepMatching = True

+        while keepMatching:

+            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired

+            failed = []

+            for e in tmpExprs:

+                try:

+                    tmpLoc = e.tryParse( instring, tmpLoc )

+                except ParseException:

+                    failed.append(e)

+                else:

+                    matchOrder.append(self.opt1map.get(id(e),e))

+                    if e in tmpReqd:

+                        tmpReqd.remove(e)

+                    elif e in tmpOpt:

+                        tmpOpt.remove(e)

+            if len(failed) == len(tmpExprs):

+                keepMatching = False

+

+        if tmpReqd:

+            missing = ", ".join(_ustr(e) for e in tmpReqd)

+            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )

+

+        # add any unmatched Optionals, in case they have default values defined

+        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]

+

+        resultlist = []

+        for e in matchOrder:

+            loc,results = e._parse(instring,loc,doActions)

+            resultlist.append(results)

+

+        finalResults = sum(resultlist, ParseResults([]))

+        return loc, finalResults

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"

+

+        return self.strRepr

+

+    def checkRecursion( self, parseElementList ):

+        subRecCheckList = parseElementList[:] + [ self ]

+        for e in self.exprs:

+            e.checkRecursion( subRecCheckList )

+

+

+class ParseElementEnhance(ParserElement):

+    """

+    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.

+    """

+    def __init__( self, expr, savelist=False ):

+        super(ParseElementEnhance,self).__init__(savelist)

+        if isinstance( expr, basestring ):

+            if issubclass(ParserElement._literalStringClass, Token):

+                expr = ParserElement._literalStringClass(expr)

+            else:

+                expr = ParserElement._literalStringClass(Literal(expr))

+        self.expr = expr

+        self.strRepr = None

+        if expr is not None:

+            self.mayIndexError = expr.mayIndexError

+            self.mayReturnEmpty = expr.mayReturnEmpty

+            self.setWhitespaceChars( expr.whiteChars )

+            self.skipWhitespace = expr.skipWhitespace

+            self.saveAsList = expr.saveAsList

+            self.callPreparse = expr.callPreparse

+            self.ignoreExprs.extend(expr.ignoreExprs)

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if self.expr is not None:

+            return self.expr._parse( instring, loc, doActions, callPreParse=False )

+        else:

+            raise ParseException("",loc,self.errmsg,self)

+

+    def leaveWhitespace( self ):

+        self.skipWhitespace = False

+        self.expr = self.expr.copy()

+        if self.expr is not None:

+            self.expr.leaveWhitespace()

+        return self

+

+    def ignore( self, other ):

+        if isinstance( other, Suppress ):

+            if other not in self.ignoreExprs:

+                super( ParseElementEnhance, self).ignore( other )

+                if self.expr is not None:

+                    self.expr.ignore( self.ignoreExprs[-1] )

+        else:

+            super( ParseElementEnhance, self).ignore( other )

+            if self.expr is not None:

+                self.expr.ignore( self.ignoreExprs[-1] )

+        return self

+

+    def streamline( self ):

+        super(ParseElementEnhance,self).streamline()

+        if self.expr is not None:

+            self.expr.streamline()

+        return self

+

+    def checkRecursion( self, parseElementList ):

+        if self in parseElementList:

+            raise RecursiveGrammarException( parseElementList+[self] )

+        subRecCheckList = parseElementList[:] + [ self ]

+        if self.expr is not None:

+            self.expr.checkRecursion( subRecCheckList )

+

+    def validate( self, validateTrace=[] ):

+        tmp = validateTrace[:]+[self]

+        if self.expr is not None:

+            self.expr.validate(tmp)

+        self.checkRecursion( [] )

+

+    def __str__( self ):

+        try:

+            return super(ParseElementEnhance,self).__str__()

+        except Exception:

+            pass

+

+        if self.strRepr is None and self.expr is not None:

+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )

+        return self.strRepr

+

+

+class FollowedBy(ParseElementEnhance):

+    """

+    Lookahead matching of the given parse expression.  C{FollowedBy}

+    does I{not} advance the parsing position within the input string, it only

+    verifies that the specified parse expression matches at the current

+    position.  C{FollowedBy} always returns a null token list.

+

+    Example::

+        # use FollowedBy to match a label only if it is followed by a ':'

+        data_word = Word(alphas)

+        label = data_word + FollowedBy(':')

+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))

+        

+        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()

+    prints::

+        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]

+    """

+    def __init__( self, expr ):

+        super(FollowedBy,self).__init__(expr)

+        self.mayReturnEmpty = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        self.expr.tryParse( instring, loc )

+        return loc, []

+

+

+class NotAny(ParseElementEnhance):

+    """

+    Lookahead to disallow matching with the given parse expression.  C{NotAny}

+    does I{not} advance the parsing position within the input string, it only

+    verifies that the specified parse expression does I{not} match at the current

+    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}

+    always returns a null token list.  May be constructed using the '~' operator.

+

+    Example::

+        

+    """

+    def __init__( self, expr ):

+        super(NotAny,self).__init__(expr)

+        #~ self.leaveWhitespace()

+        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs

+        self.mayReturnEmpty = True

+        self.errmsg = "Found unwanted token, "+_ustr(self.expr)

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        if self.expr.canParseNext(instring, loc):

+            raise ParseException(instring, loc, self.errmsg, self)

+        return loc, []

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "~{" + _ustr(self.expr) + "}"

+

+        return self.strRepr

+

+class _MultipleMatch(ParseElementEnhance):

+    def __init__( self, expr, stopOn=None):

+        super(_MultipleMatch, self).__init__(expr)

+        self.saveAsList = True

+        ender = stopOn

+        if isinstance(ender, basestring):

+            ender = ParserElement._literalStringClass(ender)

+        self.not_ender = ~ender if ender is not None else None

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        self_expr_parse = self.expr._parse

+        self_skip_ignorables = self._skipIgnorables

+        check_ender = self.not_ender is not None

+        if check_ender:

+            try_not_ender = self.not_ender.tryParse

+        

+        # must be at least one (but first see if we are the stopOn sentinel;

+        # if so, fail)

+        if check_ender:

+            try_not_ender(instring, loc)

+        loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )

+        try:

+            hasIgnoreExprs = (not not self.ignoreExprs)

+            while 1:

+                if check_ender:

+                    try_not_ender(instring, loc)

+                if hasIgnoreExprs:

+                    preloc = self_skip_ignorables( instring, loc )

+                else:

+                    preloc = loc

+                loc, tmptokens = self_expr_parse( instring, preloc, doActions )

+                if tmptokens or tmptokens.haskeys():

+                    tokens += tmptokens

+        except (ParseException,IndexError):

+            pass

+

+        return loc, tokens

+        

+class OneOrMore(_MultipleMatch):

+    """

+    Repetition of one or more of the given expression.

+    

+    Parameters:

+     - expr - expression that must match one or more times

+     - stopOn - (default=C{None}) - expression for a terminating sentinel

+          (only required if the sentinel would ordinarily match the repetition 

+          expression)          

+

+    Example::

+        data_word = Word(alphas)

+        label = data_word + FollowedBy(':')

+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

+

+        text = "shape: SQUARE posn: upper left color: BLACK"

+        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]

+

+        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data

+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))

+        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]

+        

+        # could also be written as

+        (attr_expr * (1,)).parseString(text).pprint()

+    """

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "{" + _ustr(self.expr) + "}..."

+

+        return self.strRepr

+

+class ZeroOrMore(_MultipleMatch):

+    """

+    Optional repetition of zero or more of the given expression.

+    

+    Parameters:

+     - expr - expression that must match zero or more times

+     - stopOn - (default=C{None}) - expression for a terminating sentinel

+          (only required if the sentinel would ordinarily match the repetition 

+          expression)          

+

+    Example: similar to L{OneOrMore}

+    """

+    def __init__( self, expr, stopOn=None):

+        super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)

+        self.mayReturnEmpty = True

+        

+    def parseImpl( self, instring, loc, doActions=True ):

+        try:

+            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)

+        except (ParseException,IndexError):

+            return loc, []

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "[" + _ustr(self.expr) + "]..."

+

+        return self.strRepr

+

+class _NullToken(object):

+    def __bool__(self):

+        return False

+    __nonzero__ = __bool__

+    def __str__(self):

+        return ""

+

+_optionalNotMatched = _NullToken()

+class Optional(ParseElementEnhance):

+    """

+    Optional matching of the given expression.

+

+    Parameters:

+     - expr - expression that must match zero or more times

+     - default (optional) - value to be returned if the optional expression is not found.

+

+    Example::

+        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier

+        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))

+        zip.runTests('''

+            # traditional ZIP code

+            12345

+            

+            # ZIP+4 form

+            12101-0001

+            

+            # invalid ZIP

+            98765-

+            ''')

+    prints::

+        # traditional ZIP code

+        12345

+        ['12345']

+

+        # ZIP+4 form

+        12101-0001

+        ['12101-0001']

+

+        # invalid ZIP

+        98765-

+             ^

+        FAIL: Expected end of text (at char 5), (line:1, col:6)

+    """

+    def __init__( self, expr, default=_optionalNotMatched ):

+        super(Optional,self).__init__( expr, savelist=False )

+        self.saveAsList = self.expr.saveAsList

+        self.defaultValue = default

+        self.mayReturnEmpty = True

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        try:

+            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )

+        except (ParseException,IndexError):

+            if self.defaultValue is not _optionalNotMatched:

+                if self.expr.resultsName:

+                    tokens = ParseResults([ self.defaultValue ])

+                    tokens[self.expr.resultsName] = self.defaultValue

+                else:

+                    tokens = [ self.defaultValue ]

+            else:

+                tokens = []

+        return loc, tokens

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+

+        if self.strRepr is None:

+            self.strRepr = "[" + _ustr(self.expr) + "]"

+

+        return self.strRepr

+

+class SkipTo(ParseElementEnhance):

+    """

+    Token for skipping over all undefined text until the matched expression is found.

+

+    Parameters:

+     - expr - target expression marking the end of the data to be skipped

+     - include - (default=C{False}) if True, the target expression is also parsed 

+          (the skipped text and target expression are returned as a 2-element list).

+     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 

+          comments) that might contain false matches to the target expression

+     - failOn - (default=C{None}) define expressions that are not allowed to be 

+          included in the skipped test; if found before the target expression is found, 

+          the SkipTo is not a match

+

+    Example::

+        report = '''

+            Outstanding Issues Report - 1 Jan 2000

+

+               # | Severity | Description                               |  Days Open

+            -----+----------+-------------------------------------------+-----------

+             101 | Critical | Intermittent system crash                 |          6

+              94 | Cosmetic | Spelling error on Login ('log|n')         |         14

+              79 | Minor    | System slow when running too many reports |         47

+            '''

+        integer = Word(nums)

+        SEP = Suppress('|')

+        # use SkipTo to simply match everything up until the next SEP

+        # - ignore quoted strings, so that a '|' character inside a quoted string does not match

+        # - parse action will call token.strip() for each matched token, i.e., the description body

+        string_data = SkipTo(SEP, ignore=quotedString)

+        string_data.setParseAction(tokenMap(str.strip))

+        ticket_expr = (integer("issue_num") + SEP 

+                      + string_data("sev") + SEP 

+                      + string_data("desc") + SEP 

+                      + integer("days_open"))

+        

+        for tkt in ticket_expr.searchString(report):

+            print tkt.dump()

+    prints::

+        ['101', 'Critical', 'Intermittent system crash', '6']

+        - days_open: 6

+        - desc: Intermittent system crash

+        - issue_num: 101

+        - sev: Critical

+        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']

+        - days_open: 14

+        - desc: Spelling error on Login ('log|n')

+        - issue_num: 94

+        - sev: Cosmetic

+        ['79', 'Minor', 'System slow when running too many reports', '47']

+        - days_open: 47

+        - desc: System slow when running too many reports

+        - issue_num: 79

+        - sev: Minor

+    """

+    def __init__( self, other, include=False, ignore=None, failOn=None ):

+        super( SkipTo, self ).__init__( other )

+        self.ignoreExpr = ignore

+        self.mayReturnEmpty = True

+        self.mayIndexError = False

+        self.includeMatch = include

+        self.asList = False

+        if isinstance(failOn, basestring):

+            self.failOn = ParserElement._literalStringClass(failOn)

+        else:

+            self.failOn = failOn

+        self.errmsg = "No match found for "+_ustr(self.expr)

+

+    def parseImpl( self, instring, loc, doActions=True ):

+        startloc = loc

+        instrlen = len(instring)

+        expr = self.expr

+        expr_parse = self.expr._parse

+        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None

+        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None

+        

+        tmploc = loc

+        while tmploc <= instrlen:

+            if self_failOn_canParseNext is not None:

+                # break if failOn expression matches

+                if self_failOn_canParseNext(instring, tmploc):

+                    break

+                    

+            if self_ignoreExpr_tryParse is not None:

+                # advance past ignore expressions

+                while 1:

+                    try:

+                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)

+                    except ParseBaseException:

+                        break

+            

+            try:

+                expr_parse(instring, tmploc, doActions=False, callPreParse=False)

+            except (ParseException, IndexError):

+                # no match, advance loc in string

+                tmploc += 1

+            else:

+                # matched skipto expr, done

+                break

+

+        else:

+            # ran off the end of the input string without matching skipto expr, fail

+            raise ParseException(instring, loc, self.errmsg, self)

+

+        # build up return values

+        loc = tmploc

+        skiptext = instring[startloc:loc]

+        skipresult = ParseResults(skiptext)

+        

+        if self.includeMatch:

+            loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)

+            skipresult += mat

+

+        return loc, skipresult

+

+class Forward(ParseElementEnhance):

+    """

+    Forward declaration of an expression to be defined later -

+    used for recursive grammars, such as algebraic infix notation.

+    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.

+

+    Note: take care when assigning to C{Forward} not to overlook precedence of operators.

+    Specifically, '|' has a lower precedence than '<<', so that::

+        fwdExpr << a | b | c

+    will actually be evaluated as::

+        (fwdExpr << a) | b | c

+    thereby leaving b and c out as parseable alternatives.  It is recommended that you

+    explicitly group the values inserted into the C{Forward}::

+        fwdExpr << (a | b | c)

+    Converting to use the '<<=' operator instead will avoid this problem.

+

+    See L{ParseResults.pprint} for an example of a recursive parser created using

+    C{Forward}.

+    """

+    def __init__( self, other=None ):

+        super(Forward,self).__init__( other, savelist=False )

+

+    def __lshift__( self, other ):

+        if isinstance( other, basestring ):

+            other = ParserElement._literalStringClass(other)

+        self.expr = other

+        self.strRepr = None

+        self.mayIndexError = self.expr.mayIndexError

+        self.mayReturnEmpty = self.expr.mayReturnEmpty

+        self.setWhitespaceChars( self.expr.whiteChars )

+        self.skipWhitespace = self.expr.skipWhitespace

+        self.saveAsList = self.expr.saveAsList

+        self.ignoreExprs.extend(self.expr.ignoreExprs)

+        return self

+        

+    def __ilshift__(self, other):

+        return self << other

+    

+    def leaveWhitespace( self ):

+        self.skipWhitespace = False

+        return self

+

+    def streamline( self ):

+        if not self.streamlined:

+            self.streamlined = True

+            if self.expr is not None:

+                self.expr.streamline()

+        return self

+

+    def validate( self, validateTrace=[] ):

+        if self not in validateTrace:

+            tmp = validateTrace[:]+[self]

+            if self.expr is not None:

+                self.expr.validate(tmp)

+        self.checkRecursion([])

+

+    def __str__( self ):

+        if hasattr(self,"name"):

+            return self.name

+        return self.__class__.__name__ + ": ..."

+

+        # stubbed out for now - creates awful memory and perf issues

+        self._revertClass = self.__class__

+        self.__class__ = _ForwardNoRecurse

+        try:

+            if self.expr is not None:

+                retString = _ustr(self.expr)

+            else:

+                retString = "None"

+        finally:

+            self.__class__ = self._revertClass

+        return self.__class__.__name__ + ": " + retString

+

+    def copy(self):

+        if self.expr is not None:

+            return super(Forward,self).copy()

+        else:

+            ret = Forward()

+            ret <<= self

+            return ret

+

+class _ForwardNoRecurse(Forward):

+    def __str__( self ):

+        return "..."

+

+class TokenConverter(ParseElementEnhance):

+    """

+    Abstract subclass of C{ParseExpression}, for converting parsed results.

+    """

+    def __init__( self, expr, savelist=False ):

+        super(TokenConverter,self).__init__( expr )#, savelist )

+        self.saveAsList = False

+

+class Combine(TokenConverter):

+    """

+    Converter to concatenate all matching tokens to a single string.

+    By default, the matching patterns must also be contiguous in the input string;

+    this can be disabled by specifying C{'adjacent=False'} in the constructor.

+

+    Example::

+        real = Word(nums) + '.' + Word(nums)

+        print(real.parseString('3.1416')) # -> ['3', '.', '1416']

+        # will also erroneously match the following

+        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']

+

+        real = Combine(Word(nums) + '.' + Word(nums))

+        print(real.parseString('3.1416')) # -> ['3.1416']

+        # no match when there are internal spaces

+        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)

+    """

+    def __init__( self, expr, joinString="", adjacent=True ):

+        super(Combine,self).__init__( expr )

+        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself

+        if adjacent:

+            self.leaveWhitespace()

+        self.adjacent = adjacent

+        self.skipWhitespace = True

+        self.joinString = joinString

+        self.callPreparse = True

+

+    def ignore( self, other ):

+        if self.adjacent:

+            ParserElement.ignore(self, other)

+        else:

+            super( Combine, self).ignore( other )

+        return self

+

+    def postParse( self, instring, loc, tokenlist ):

+        retToks = tokenlist.copy()

+        del retToks[:]

+        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)

+

+        if self.resultsName and retToks.haskeys():

+            return [ retToks ]

+        else:

+            return retToks

+

+class Group(TokenConverter):

+    """

+    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.

+

+    Example::

+        ident = Word(alphas)

+        num = Word(nums)

+        term = ident | num

+        func = ident + Optional(delimitedList(term))

+        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']

+

+        func = ident + Group(Optional(delimitedList(term)))

+        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]

+    """

+    def __init__( self, expr ):

+        super(Group,self).__init__( expr )

+        self.saveAsList = True

+

+    def postParse( self, instring, loc, tokenlist ):

+        return [ tokenlist ]

+

+class Dict(TokenConverter):

+    """

+    Converter to return a repetitive expression as a list, but also as a dictionary.

+    Each element can also be referenced using the first token in the expression as its key.

+    Useful for tabular report scraping when the first column can be used as a item key.

+

+    Example::

+        data_word = Word(alphas)

+        label = data_word + FollowedBy(':')

+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))

+

+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"

+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))

+        

+        # print attributes as plain groups

+        print(OneOrMore(attr_expr).parseString(text).dump())

+        

+        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names

+        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)

+        print(result.dump())

+        

+        # access named fields as dict entries, or output as dict

+        print(result['shape'])        

+        print(result.asDict())

+    prints::

+        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']

+

+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]

+        - color: light blue

+        - posn: upper left

+        - shape: SQUARE

+        - texture: burlap

+        SQUARE

+        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}

+    See more examples at L{ParseResults} of accessing fields by results name.

+    """

+    def __init__( self, expr ):

+        super(Dict,self).__init__( expr )

+        self.saveAsList = True

+

+    def postParse( self, instring, loc, tokenlist ):

+        for i,tok in enumerate(tokenlist):

+            if len(tok) == 0:

+                continue

+            ikey = tok[0]

+            if isinstance(ikey,int):

+                ikey = _ustr(tok[0]).strip()

+            if len(tok)==1:

+                tokenlist[ikey] = _ParseResultsWithOffset("",i)

+            elif len(tok)==2 and not isinstance(tok[1],ParseResults):

+                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)

+            else:

+                dictvalue = tok.copy() #ParseResults(i)

+                del dictvalue[0]

+                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):

+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)

+                else:

+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)

+

+        if self.resultsName:

+            return [ tokenlist ]

+        else:

+            return tokenlist

+

+

+class Suppress(TokenConverter):

+    """

+    Converter for ignoring the results of a parsed expression.

+

+    Example::

+        source = "a, b, c,d"

+        wd = Word(alphas)

+        wd_list1 = wd + ZeroOrMore(',' + wd)

+        print(wd_list1.parseString(source))

+

+        # often, delimiters that are useful during parsing are just in the

+        # way afterward - use Suppress to keep them out of the parsed output

+        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)

+        print(wd_list2.parseString(source))

+    prints::

+        ['a', ',', 'b', ',', 'c', ',', 'd']

+        ['a', 'b', 'c', 'd']

+    (See also L{delimitedList}.)

+    """

+    def postParse( self, instring, loc, tokenlist ):

+        return []

+

+    def suppress( self ):

+        return self

+

+

+class OnlyOnce(object):

+    """

+    Wrapper for parse actions, to ensure they are only called once.

+    """

+    def __init__(self, methodCall):

+        self.callable = _trim_arity(methodCall)

+        self.called = False

+    def __call__(self,s,l,t):

+        if not self.called:

+            results = self.callable(s,l,t)

+            self.called = True

+            return results

+        raise ParseException(s,l,"")

+    def reset(self):

+        self.called = False

+

+def traceParseAction(f):

+    """

+    Decorator for debugging parse actions. 

+    

+    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}

+    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.

+

+    Example::

+        wd = Word(alphas)

+

+        @traceParseAction

+        def remove_duplicate_chars(tokens):

+            return ''.join(sorted(set(''.join(tokens)))

+

+        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)

+        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))

+    prints::

+        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))

+        <<leaving remove_duplicate_chars (ret: 'dfjkls')

+        ['dfjkls']

+    """

+    f = _trim_arity(f)

+    def z(*paArgs):

+        thisFunc = f.__name__

+        s,l,t = paArgs[-3:]

+        if len(paArgs)>3:

+            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc

+        sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )

+        try:

+            ret = f(*paArgs)

+        except Exception as exc:

+            sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )

+            raise

+        sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )

+        return ret

+    try:

+        z.__name__ = f.__name__

+    except AttributeError:

+        pass

+    return z

+

+#

+# global helpers

+#

+def delimitedList( expr, delim=",", combine=False ):

+    """

+    Helper to define a delimited list of expressions - the delimiter defaults to ','.

+    By default, the list elements and delimiters can have intervening whitespace, and

+    comments, but this can be overridden by passing C{combine=True} in the constructor.

+    If C{combine} is set to C{True}, the matching tokens are returned as a single token

+    string, with the delimiters included; otherwise, the matching tokens are returned

+    as a list of tokens, with the delimiters suppressed.

+

+    Example::

+        delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']

+        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']

+    """

+    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."

+    if combine:

+        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)

+    else:

+        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)

+

+def countedArray( expr, intExpr=None ):

+    """

+    Helper to define a counted list of expressions.

+    This helper defines a pattern of the form::

+        integer expr expr expr...

+    where the leading integer tells how many expr expressions follow.

+    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.

+    

+    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.

+

+    Example::

+        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']

+

+        # in this parser, the leading integer value is given in binary,

+        # '10' indicating that 2 values are in the array

+        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))

+        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']

+    """

+    arrayExpr = Forward()

+    def countFieldParseAction(s,l,t):

+        n = t[0]

+        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))

+        return []

+    if intExpr is None:

+        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))

+    else:

+        intExpr = intExpr.copy()

+    intExpr.setName("arrayLen")

+    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)

+    return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')

+

+def _flatten(L):

+    ret = []

+    for i in L:

+        if isinstance(i,list):

+            ret.extend(_flatten(i))

+        else:

+            ret.append(i)

+    return ret

+

+def matchPreviousLiteral(expr):

+    """

+    Helper to define an expression that is indirectly defined from

+    the tokens matched in a previous expression, that is, it looks

+    for a 'repeat' of a previous expression.  For example::

+        first = Word(nums)

+        second = matchPreviousLiteral(first)

+        matchExpr = first + ":" + second

+    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a

+    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.

+    If this is not desired, use C{matchPreviousExpr}.

+    Do I{not} use with packrat parsing enabled.

+    """

+    rep = Forward()

+    def copyTokenToRepeater(s,l,t):

+        if t:

+            if len(t) == 1:

+                rep << t[0]

+            else:

+                # flatten t tokens

+                tflat = _flatten(t.asList())

+                rep << And(Literal(tt) for tt in tflat)

+        else:

+            rep << Empty()

+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)

+    rep.setName('(prev) ' + _ustr(expr))

+    return rep

+

+def matchPreviousExpr(expr):

+    """

+    Helper to define an expression that is indirectly defined from

+    the tokens matched in a previous expression, that is, it looks

+    for a 'repeat' of a previous expression.  For example::

+        first = Word(nums)

+        second = matchPreviousExpr(first)

+        matchExpr = first + ":" + second

+    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by

+    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};

+    the expressions are evaluated first, and then compared, so

+    C{"1"} is compared with C{"10"}.

+    Do I{not} use with packrat parsing enabled.

+    """

+    rep = Forward()

+    e2 = expr.copy()

+    rep <<= e2

+    def copyTokenToRepeater(s,l,t):

+        matchTokens = _flatten(t.asList())

+        def mustMatchTheseTokens(s,l,t):

+            theseTokens = _flatten(t.asList())

+            if  theseTokens != matchTokens:

+                raise ParseException("",0,"")

+        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )

+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)

+    rep.setName('(prev) ' + _ustr(expr))

+    return rep

+

+def _escapeRegexRangeChars(s):

+    #~  escape these chars: ^-]

+    for c in r"\^-]":

+        s = s.replace(c,_bslash+c)

+    s = s.replace("\n",r"\n")

+    s = s.replace("\t",r"\t")

+    return _ustr(s)

+

+def oneOf( strs, caseless=False, useRegex=True ):

+    """

+    Helper to quickly define a set of alternative Literals, and makes sure to do

+    longest-first testing when there is a conflict, regardless of the input order,

+    but returns a C{L{MatchFirst}} for best performance.

+

+    Parameters:

+     - strs - a string of space-delimited literals, or a collection of string literals

+     - caseless - (default=C{False}) - treat all literals as caseless

+     - useRegex - (default=C{True}) - as an optimization, will generate a Regex

+          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or

+          if creating a C{Regex} raises an exception)

+

+    Example::

+        comp_oper = oneOf("< = > <= >= !=")

+        var = Word(alphas)

+        number = Word(nums)

+        term = var | number

+        comparison_expr = term + comp_oper + term

+        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))

+    prints::

+        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]

+    """

+    if caseless:

+        isequal = ( lambda a,b: a.upper() == b.upper() )

+        masks = ( lambda a,b: b.upper().startswith(a.upper()) )

+        parseElementClass = CaselessLiteral

+    else:

+        isequal = ( lambda a,b: a == b )

+        masks = ( lambda a,b: b.startswith(a) )

+        parseElementClass = Literal

+

+    symbols = []

+    if isinstance(strs,basestring):

+        symbols = strs.split()

+    elif isinstance(strs, collections.Iterable):

+        symbols = list(strs)

+    else:

+        warnings.warn("Invalid argument to oneOf, expected string or iterable",

+                SyntaxWarning, stacklevel=2)

+    if not symbols:

+        return NoMatch()

+

+    i = 0

+    while i < len(symbols)-1:

+        cur = symbols[i]

+        for j,other in enumerate(symbols[i+1:]):

+            if ( isequal(other, cur) ):

+                del symbols[i+j+1]

+                break

+            elif ( masks(cur, other) ):

+                del symbols[i+j+1]

+                symbols.insert(i,other)

+                cur = other

+                break

+        else:

+            i += 1

+

+    if not caseless and useRegex:

+        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))

+        try:

+            if len(symbols)==len("".join(symbols)):

+                return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))

+            else:

+                return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))

+        except Exception:

+            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",

+                    SyntaxWarning, stacklevel=2)

+

+

+    # last resort, just use MatchFirst

+    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))

+

+def dictOf( key, value ):

+    """

+    Helper to easily and clearly define a dictionary by specifying the respective patterns

+    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens

+    in the proper order.  The key pattern can include delimiting markers or punctuation,

+    as long as they are suppressed, thereby leaving the significant key text.  The value

+    pattern can include named results, so that the C{Dict} results can include named token

+    fields.

+

+    Example::

+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"

+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))

+        print(OneOrMore(attr_expr).parseString(text).dump())

+        

+        attr_label = label

+        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)

+

+        # similar to Dict, but simpler call format

+        result = dictOf(attr_label, attr_value).parseString(text)

+        print(result.dump())

+        print(result['shape'])

+        print(result.shape)  # object attribute access works too

+        print(result.asDict())

+    prints::

+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]

+        - color: light blue

+        - posn: upper left

+        - shape: SQUARE

+        - texture: burlap

+        SQUARE

+        SQUARE

+        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}

+    """

+    return Dict( ZeroOrMore( Group ( key + value ) ) )

+

+def originalTextFor(expr, asString=True):

+    """

+    Helper to return the original, untokenized text for a given expression.  Useful to

+    restore the parsed fields of an HTML start tag into the raw tag text itself, or to

+    revert separate tokens with intervening whitespace back to the original matching

+    input text. By default, returns astring containing the original parsed text.  

+       

+    If the optional C{asString} argument is passed as C{False}, then the return value is a 

+    C{L{ParseResults}} containing any results names that were originally matched, and a 

+    single token containing the original matched text from the input string.  So if 

+    the expression passed to C{L{originalTextFor}} contains expressions with defined

+    results names, you must set C{asString} to C{False} if you want to preserve those

+    results name values.

+

+    Example::

+        src = "this is test <b> bold <i>text</i> </b> normal text "

+        for tag in ("b","i"):

+            opener,closer = makeHTMLTags(tag)

+            patt = originalTextFor(opener + SkipTo(closer) + closer)

+            print(patt.searchString(src)[0])

+    prints::

+        ['<b> bold <i>text</i> </b>']

+        ['<i>text</i>']

+    """

+    locMarker = Empty().setParseAction(lambda s,loc,t: loc)

+    endlocMarker = locMarker.copy()

+    endlocMarker.callPreparse = False

+    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")

+    if asString:

+        extractText = lambda s,l,t: s[t._original_start:t._original_end]

+    else:

+        def extractText(s,l,t):

+            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]

+    matchExpr.setParseAction(extractText)

+    matchExpr.ignoreExprs = expr.ignoreExprs

+    return matchExpr

+

+def ungroup(expr): 

+    """

+    Helper to undo pyparsing's default grouping of And expressions, even

+    if all but one are non-empty.

+    """

+    return TokenConverter(expr).setParseAction(lambda t:t[0])

+

+def locatedExpr(expr):

+    """

+    Helper to decorate a returned token with its starting and ending locations in the input string.

+    This helper adds the following results names:

+     - locn_start = location where matched expression begins

+     - locn_end = location where matched expression ends

+     - value = the actual parsed results

+

+    Be careful if the input text contains C{<TAB>} characters, you may want to call

+    C{L{ParserElement.parseWithTabs}}

+

+    Example::

+        wd = Word(alphas)

+        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):

+            print(match)

+    prints::

+        [[0, 'ljsdf', 5]]

+        [[8, 'lksdjjf', 15]]

+        [[18, 'lkkjj', 23]]

+    """

+    locator = Empty().setParseAction(lambda s,l,t: l)

+    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))

+

+

+# convenience constants for positional expressions

+empty       = Empty().setName("empty")

+lineStart   = LineStart().setName("lineStart")

+lineEnd     = LineEnd().setName("lineEnd")

+stringStart = StringStart().setName("stringStart")

+stringEnd   = StringEnd().setName("stringEnd")

+

+_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])

+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))

+_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))

+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)

+_charRange = Group(_singleChar + Suppress("-") + _singleChar)

+_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"

+

+def srange(s):

+    r"""

+    Helper to easily define string ranges for use in Word construction.  Borrows

+    syntax from regexp '[]' string range definitions::

+        srange("[0-9]")   -> "0123456789"

+        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"

+        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"

+    The input string must be enclosed in []'s, and the returned string is the expanded

+    character set joined into a single string.

+    The values enclosed in the []'s may be:

+     - a single character

+     - an escaped character with a leading backslash (such as C{\-} or C{\]})

+     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 

+         (C{\0x##} is also supported for backwards compatibility) 

+     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)

+     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)

+     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)

+    """

+    _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))

+    try:

+        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)

+    except Exception:

+        return ""

+

+def matchOnlyAtCol(n):

+    """

+    Helper method for defining parse actions that require matching at a specific

+    column in the input text.

+    """

+    def verifyCol(strg,locn,toks):

+        if col(locn,strg) != n:

+            raise ParseException(strg,locn,"matched token not at column %d" % n)

+    return verifyCol

+

+def replaceWith(replStr):

+    """

+    Helper method for common parse actions that simply return a literal value.  Especially

+    useful when used with C{L{transformString<ParserElement.transformString>}()}.

+

+    Example::

+        num = Word(nums).setParseAction(lambda toks: int(toks[0]))

+        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))

+        term = na | num

+        

+        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]

+    """

+    return lambda s,l,t: [replStr]

+

+def removeQuotes(s,l,t):

+    """

+    Helper parse action for removing quotation marks from parsed quoted strings.

+

+    Example::

+        # by default, quotation marks are included in parsed results

+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]

+

+        # use removeQuotes to strip quotation marks from parsed results

+        quotedString.setParseAction(removeQuotes)

+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]

+    """

+    return t[0][1:-1]

+

+def tokenMap(func, *args):

+    """

+    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 

+    args are passed, they are forwarded to the given function as additional arguments after

+    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the

+    parsed data to an integer using base 16.

+

+    Example (compare the last to example in L{ParserElement.transformString}::

+        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))

+        hex_ints.runTests('''

+            00 11 22 aa FF 0a 0d 1a

+            ''')

+        

+        upperword = Word(alphas).setParseAction(tokenMap(str.upper))

+        OneOrMore(upperword).runTests('''

+            my kingdom for a horse

+            ''')

+

+        wd = Word(alphas).setParseAction(tokenMap(str.title))

+        OneOrMore(wd).setParseAction(' '.join).runTests('''

+            now is the winter of our discontent made glorious summer by this sun of york

+            ''')

+    prints::

+        00 11 22 aa FF 0a 0d 1a

+        [0, 17, 34, 170, 255, 10, 13, 26]

+

+        my kingdom for a horse

+        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']

+

+        now is the winter of our discontent made glorious summer by this sun of york

+        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']

+    """

+    def pa(s,l,t):

+        return [func(tokn, *args) for tokn in t]

+

+    try:

+        func_name = getattr(func, '__name__', 

+                            getattr(func, '__class__').__name__)

+    except Exception:

+        func_name = str(func)

+    pa.__name__ = func_name

+

+    return pa

+

+upcaseTokens = tokenMap(lambda t: _ustr(t).upper())

+"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""

+

+downcaseTokens = tokenMap(lambda t: _ustr(t).lower())

+"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""

+    

+def _makeTags(tagStr, xml):

+    """Internal helper to construct opening and closing tag expressions, given a tag name"""

+    if isinstance(tagStr,basestring):

+        resname = tagStr

+        tagStr = Keyword(tagStr, caseless=not xml)

+    else:

+        resname = tagStr.name

+

+    tagAttrName = Word(alphas,alphanums+"_-:")

+    if (xml):

+        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )

+        openTag = Suppress("<") + tagStr("tag") + \

+                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \

+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")

+    else:

+        printablesLessRAbrack = "".join(c for c in printables if c not in ">")

+        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)

+        openTag = Suppress("<") + tagStr("tag") + \

+                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \

+                Optional( Suppress("=") + tagAttrValue ) ))) + \

+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")

+    closeTag = Combine(_L("</") + tagStr + ">")

+

+    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)

+    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)

+    openTag.tag = resname

+    closeTag.tag = resname

+    return openTag, closeTag

+

+def makeHTMLTags(tagStr):

+    """

+    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches

+    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.

+

+    Example::

+        text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'

+        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple

+        a,a_end = makeHTMLTags("A")

+        link_expr = a + SkipTo(a_end)("link_text") + a_end

+        

+        for link in link_expr.searchString(text):

+            # attributes in the <A> tag (like "href" shown here) are also accessible as named results

+            print(link.link_text, '->', link.href)

+    prints::

+        pyparsing -> http://pyparsing.wikispaces.com

+    """

+    return _makeTags( tagStr, False )

+

+def makeXMLTags(tagStr):

+    """

+    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches

+    tags only in the given upper/lower case.

+

+    Example: similar to L{makeHTMLTags}

+    """

+    return _makeTags( tagStr, True )

+

+def withAttribute(*args,**attrDict):

+    """

+    Helper to create a validating parse action to be used with start tags created

+    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag

+    with a required attribute value, to avoid false matches on common tags such as

+    C{<TD>} or C{<DIV>}.

+

+    Call C{withAttribute} with a series of attribute names and values. Specify the list

+    of filter attributes names and values as:

+     - keyword arguments, as in C{(align="right")}, or

+     - as an explicit dict with C{**} operator, when an attribute name is also a Python

+          reserved word, as in C{**{"class":"Customer", "align":"right"}}

+     - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )

+    For attribute names with a namespace prefix, you must use the second form.  Attribute

+    names are matched insensitive to upper/lower case.

+       

+    If just testing for C{class} (with or without a namespace), use C{L{withClass}}.

+

+    To verify that the attribute exists, but without specifying a value, pass

+    C{withAttribute.ANY_VALUE} as the value.

+

+    Example::

+        html = '''

+            <div>

+            Some text

+            <div type="grid">1 4 0 1 0</div>

+            <div type="graph">1,3 2,3 1,1</div>

+            <div>this has no type</div>

+            </div>

+                

+        '''

+        div,div_end = makeHTMLTags("div")

+

+        # only match div tag having a type attribute with value "grid"

+        div_grid = div().setParseAction(withAttribute(type="grid"))

+        grid_expr = div_grid + SkipTo(div | div_end)("body")

+        for grid_header in grid_expr.searchString(html):

+            print(grid_header.body)

+        

+        # construct a match with any div tag having a type attribute, regardless of the value

+        div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))

+        div_expr = div_any_type + SkipTo(div | div_end)("body")

+        for div_header in div_expr.searchString(html):

+            print(div_header.body)

+    prints::

+        1 4 0 1 0

+

+        1 4 0 1 0

+        1,3 2,3 1,1

+    """

+    if args:

+        attrs = args[:]

+    else:

+        attrs = attrDict.items()

+    attrs = [(k,v) for k,v in attrs]

+    def pa(s,l,tokens):

+        for attrName,attrValue in attrs:

+            if attrName not in tokens:

+                raise ParseException(s,l,"no matching attribute " + attrName)

+            if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:

+                raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %

+                                            (attrName, tokens[attrName], attrValue))

+    return pa

+withAttribute.ANY_VALUE = object()

+

+def withClass(classname, namespace=''):

+    """

+    Simplified version of C{L{withAttribute}} when matching on a div class - made

+    difficult because C{class} is a reserved word in Python.

+

+    Example::

+        html = '''

+            <div>

+            Some text

+            <div class="grid">1 4 0 1 0</div>

+            <div class="graph">1,3 2,3 1,1</div>

+            <div>this &lt;div&gt; has no class</div>

+            </div>

+                

+        '''

+        div,div_end = makeHTMLTags("div")

+        div_grid = div().setParseAction(withClass("grid"))

+        

+        grid_expr = div_grid + SkipTo(div | div_end)("body")

+        for grid_header in grid_expr.searchString(html):

+            print(grid_header.body)

+        

+        div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))

+        div_expr = div_any_type + SkipTo(div | div_end)("body")

+        for div_header in div_expr.searchString(html):

+            print(div_header.body)

+    prints::

+        1 4 0 1 0

+

+        1 4 0 1 0

+        1,3 2,3 1,1

+    """

+    classattr = "%s:class" % namespace if namespace else "class"

+    return withAttribute(**{classattr : classname})        

+

+opAssoc = _Constants()

+opAssoc.LEFT = object()

+opAssoc.RIGHT = object()

+

+def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):

+    """

+    Helper method for constructing grammars of expressions made up of

+    operators working in a precedence hierarchy.  Operators may be unary or

+    binary, left- or right-associative.  Parse actions can also be attached

+    to operator expressions. The generated parser will also recognize the use 

+    of parentheses to override operator precedences (see example below).

+    

+    Note: if you define a deep operator list, you may see performance issues

+    when using infixNotation. See L{ParserElement.enablePackrat} for a

+    mechanism to potentially improve your parser performance.

+

+    Parameters:

+     - baseExpr - expression representing the most basic element for the nested

+     - opList - list of tuples, one for each operator precedence level in the

+      expression grammar; each tuple is of the form

+      (opExpr, numTerms, rightLeftAssoc, parseAction), where:

+       - opExpr is the pyparsing expression for the operator;

+          may also be a string, which will be converted to a Literal;

+          if numTerms is 3, opExpr is a tuple of two expressions, for the

+          two operators separating the 3 terms

+       - numTerms is the number of terms for this operator (must

+          be 1, 2, or 3)

+       - rightLeftAssoc is the indicator whether the operator is

+          right or left associative, using the pyparsing-defined

+          constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.

+       - parseAction is the parse action to be associated with

+          expressions matching this operator expression (the

+          parse action tuple member may be omitted)

+     - lpar - expression for matching left-parentheses (default=C{Suppress('(')})

+     - rpar - expression for matching right-parentheses (default=C{Suppress(')')})

+

+    Example::

+        # simple example of four-function arithmetic with ints and variable names

+        integer = pyparsing_common.signed_integer

+        varname = pyparsing_common.identifier 

+        

+        arith_expr = infixNotation(integer | varname,

+            [

+            ('-', 1, opAssoc.RIGHT),

+            (oneOf('* /'), 2, opAssoc.LEFT),

+            (oneOf('+ -'), 2, opAssoc.LEFT),

+            ])

+        

+        arith_expr.runTests('''

+            5+3*6

+            (5+3)*6

+            -2--11

+            ''', fullDump=False)

+    prints::

+        5+3*6

+        [[5, '+', [3, '*', 6]]]

+

+        (5+3)*6

+        [[[5, '+', 3], '*', 6]]

+

+        -2--11

+        [[['-', 2], '-', ['-', 11]]]

+    """

+    ret = Forward()

+    lastExpr = baseExpr | ( lpar + ret + rpar )

+    for i,operDef in enumerate(opList):

+        opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]

+        termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr

+        if arity == 3:

+            if opExpr is None or len(opExpr) != 2:

+                raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")

+            opExpr1, opExpr2 = opExpr

+        thisExpr = Forward().setName(termName)

+        if rightLeftAssoc == opAssoc.LEFT:

+            if arity == 1:

+                matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )

+            elif arity == 2:

+                if opExpr is not None:

+                    matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )

+                else:

+                    matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )

+            elif arity == 3:

+                matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \

+                            Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )

+            else:

+                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")

+        elif rightLeftAssoc == opAssoc.RIGHT:

+            if arity == 1:

+                # try to avoid LR with this extra test

+                if not isinstance(opExpr, Optional):

+                    opExpr = Optional(opExpr)

+                matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )

+            elif arity == 2:

+                if opExpr is not None:

+                    matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )

+                else:

+                    matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )

+            elif arity == 3:

+                matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \

+                            Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )

+            else:

+                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")

+        else:

+            raise ValueError("operator must indicate right or left associativity")

+        if pa:

+            matchExpr.setParseAction( pa )

+        thisExpr <<= ( matchExpr.setName(termName) | lastExpr )

+        lastExpr = thisExpr

+    ret <<= lastExpr

+    return ret

+

+operatorPrecedence = infixNotation

+"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""

+

+dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")

+sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")

+quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|

+                       Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")

+unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")

+

+def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):

+    """

+    Helper method for defining nested lists enclosed in opening and closing

+    delimiters ("(" and ")" are the default).

+

+    Parameters:

+     - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression

+     - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression

+     - content - expression for items within the nested lists (default=C{None})

+     - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})

+

+    If an expression is not provided for the content argument, the nested

+    expression will capture all whitespace-delimited content between delimiters

+    as a list of separate values.

+

+    Use the C{ignoreExpr} argument to define expressions that may contain

+    opening or closing characters that should not be treated as opening

+    or closing characters for nesting, such as quotedString or a comment

+    expression.  Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.

+    The default is L{quotedString}, but if no expressions are to be ignored,

+    then pass C{None} for this argument.

+

+    Example::

+        data_type = oneOf("void int short long char float double")

+        decl_data_type = Combine(data_type + Optional(Word('*')))

+        ident = Word(alphas+'_', alphanums+'_')

+        number = pyparsing_common.number

+        arg = Group(decl_data_type + ident)

+        LPAR,RPAR = map(Suppress, "()")

+

+        code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))

+

+        c_function = (decl_data_type("type") 

+                      + ident("name")

+                      + LPAR + Optional(delimitedList(arg), [])("args") + RPAR 

+                      + code_body("body"))

+        c_function.ignore(cStyleComment)

+        

+        source_code = '''

+            int is_odd(int x) { 

+                return (x%2); 

+            }

+                

+            int dec_to_hex(char hchar) { 

+                if (hchar >= '0' && hchar <= '9') { 

+                    return (ord(hchar)-ord('0')); 

+                } else { 

+                    return (10+ord(hchar)-ord('A'));

+                } 

+            }

+        '''

+        for func in c_function.searchString(source_code):

+            print("%(name)s (%(type)s) args: %(args)s" % func)

+

+    prints::

+        is_odd (int) args: [['int', 'x']]

+        dec_to_hex (int) args: [['char', 'hchar']]

+    """

+    if opener == closer:

+        raise ValueError("opening and closing strings cannot be the same")

+    if content is None:

+        if isinstance(opener,basestring) and isinstance(closer,basestring):

+            if len(opener) == 1 and len(closer)==1:

+                if ignoreExpr is not None:

+                    content = (Combine(OneOrMore(~ignoreExpr +

+                                    CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))

+                                ).setParseAction(lambda t:t[0].strip()))

+                else:

+                    content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS

+                                ).setParseAction(lambda t:t[0].strip()))

+            else:

+                if ignoreExpr is not None:

+                    content = (Combine(OneOrMore(~ignoreExpr + 

+                                    ~Literal(opener) + ~Literal(closer) +

+                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))

+                                ).setParseAction(lambda t:t[0].strip()))

+                else:

+                    content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +

+                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))

+                                ).setParseAction(lambda t:t[0].strip()))

+        else:

+            raise ValueError("opening and closing arguments must be strings if no content expression is given")

+    ret = Forward()

+    if ignoreExpr is not None:

+        ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )

+    else:

+        ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content )  + Suppress(closer) )

+    ret.setName('nested %s%s expression' % (opener,closer))

+    return ret

+

+def indentedBlock(blockStatementExpr, indentStack, indent=True):

+    """

+    Helper method for defining space-delimited indentation blocks, such as

+    those used to define block statements in Python source code.

+

+    Parameters:

+     - blockStatementExpr - expression defining syntax of statement that

+            is repeated within the indented block

+     - indentStack - list created by caller to manage indentation stack

+            (multiple statementWithIndentedBlock expressions within a single grammar

+            should share a common indentStack)

+     - indent - boolean indicating whether block must be indented beyond the

+            the current level; set to False for block of left-most statements

+            (default=C{True})

+

+    A valid block must contain at least one C{blockStatement}.

+

+    Example::

+        data = '''

+        def A(z):

+          A1

+          B = 100

+          G = A2

+          A2

+          A3

+        B

+        def BB(a,b,c):

+          BB1

+          def BBA():

+            bba1

+            bba2

+            bba3

+        C

+        D

+        def spam(x,y):

+             def eggs(z):

+                 pass

+        '''

+

+

+        indentStack = [1]

+        stmt = Forward()

+

+        identifier = Word(alphas, alphanums)

+        funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")

+        func_body = indentedBlock(stmt, indentStack)

+        funcDef = Group( funcDecl + func_body )

+

+        rvalue = Forward()

+        funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")

+        rvalue << (funcCall | identifier | Word(nums))

+        assignment = Group(identifier + "=" + rvalue)

+        stmt << ( funcDef | assignment | identifier )

+

+        module_body = OneOrMore(stmt)

+

+        parseTree = module_body.parseString(data)

+        parseTree.pprint()

+    prints::

+        [['def',

+          'A',

+          ['(', 'z', ')'],

+          ':',

+          [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],

+         'B',

+         ['def',

+          'BB',

+          ['(', 'a', 'b', 'c', ')'],

+          ':',

+          [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],

+         'C',

+         'D',

+         ['def',

+          'spam',

+          ['(', 'x', 'y', ')'],

+          ':',

+          [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] 

+    """

+    def checkPeerIndent(s,l,t):

+        if l >= len(s): return

+        curCol = col(l,s)

+        if curCol != indentStack[-1]:

+            if curCol > indentStack[-1]:

+                raise ParseFatalException(s,l,"illegal nesting")

+            raise ParseException(s,l,"not a peer entry")

+

+    def checkSubIndent(s,l,t):

+        curCol = col(l,s)

+        if curCol > indentStack[-1]:

+            indentStack.append( curCol )

+        else:

+            raise ParseException(s,l,"not a subentry")

+

+    def checkUnindent(s,l,t):

+        if l >= len(s): return

+        curCol = col(l,s)

+        if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):

+            raise ParseException(s,l,"not an unindent")

+        indentStack.pop()

+

+    NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())

+    INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')

+    PEER   = Empty().setParseAction(checkPeerIndent).setName('')

+    UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')

+    if indent:

+        smExpr = Group( Optional(NL) +

+            #~ FollowedBy(blockStatementExpr) +

+            INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)

+    else:

+        smExpr = Group( Optional(NL) +

+            (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )

+    blockStatementExpr.ignore(_bslash + LineEnd())

+    return smExpr.setName('indented block')

+

+alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")

+punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")

+

+anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))

+_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))

+commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")

+def replaceHTMLEntity(t):

+    """Helper parser action to replace common HTML entities with their special characters"""

+    return _htmlEntityMap.get(t.entity)

+

+# it's easy to get these comment structures wrong - they're very common, so may as well make them available

+cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")

+"Comment of the form C{/* ... */}"

+

+htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")

+"Comment of the form C{<!-- ... -->}"

+

+restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")

+dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")

+"Comment of the form C{// ... (to end of line)}"

+

+cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")

+"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"

+

+javaStyleComment = cppStyleComment

+"Same as C{L{cppStyleComment}}"

+

+pythonStyleComment = Regex(r"#.*").setName("Python style comment")

+"Comment of the form C{# ... (to end of line)}"

+

+_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +

+                                  Optional( Word(" \t") +

+                                            ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")

+commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")

+"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.

+   This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""

+

+# some other useful expressions - using lower-case class name since we are really using this as a namespace

+class pyparsing_common:

+    """

+    Here are some common low-level expressions that may be useful in jump-starting parser development:

+     - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})

+     - common L{programming identifiers<identifier>}

+     - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})

+     - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}

+     - L{UUID<uuid>}

+     - L{comma-separated list<comma_separated_list>}

+    Parse actions:

+     - C{L{convertToInteger}}

+     - C{L{convertToFloat}}

+     - C{L{convertToDate}}

+     - C{L{convertToDatetime}}

+     - C{L{stripHTMLTags}}

+     - C{L{upcaseTokens}}

+     - C{L{downcaseTokens}}

+

+    Example::

+        pyparsing_common.number.runTests('''

+            # any int or real number, returned as the appropriate type

+            100

+            -100

+            +100

+            3.14159

+            6.02e23

+            1e-12

+            ''')

+

+        pyparsing_common.fnumber.runTests('''

+            # any int or real number, returned as float

+            100

+            -100

+            +100

+            3.14159

+            6.02e23

+            1e-12

+            ''')

+

+        pyparsing_common.hex_integer.runTests('''

+            # hex numbers

+            100

+            FF

+            ''')

+

+        pyparsing_common.fraction.runTests('''

+            # fractions

+            1/2

+            -3/4

+            ''')

+

+        pyparsing_common.mixed_integer.runTests('''

+            # mixed fractions

+            1

+            1/2

+            -3/4

+            1-3/4

+            ''')

+

+        import uuid

+        pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))

+        pyparsing_common.uuid.runTests('''

+            # uuid

+            12345678-1234-5678-1234-567812345678

+            ''')

+    prints::

+        # any int or real number, returned as the appropriate type

+        100

+        [100]

+

+        -100

+        [-100]

+

+        +100

+        [100]

+

+        3.14159

+        [3.14159]

+

+        6.02e23

+        [6.02e+23]

+

+        1e-12

+        [1e-12]

+

+        # any int or real number, returned as float

+        100

+        [100.0]

+

+        -100

+        [-100.0]

+

+        +100

+        [100.0]

+

+        3.14159

+        [3.14159]

+

+        6.02e23

+        [6.02e+23]

+

+        1e-12

+        [1e-12]

+

+        # hex numbers

+        100

+        [256]

+

+        FF

+        [255]

+

+        # fractions

+        1/2

+        [0.5]

+

+        -3/4

+        [-0.75]

+

+        # mixed fractions

+        1

+        [1]

+

+        1/2

+        [0.5]

+

+        -3/4

+        [-0.75]

+

+        1-3/4

+        [1.75]

+

+        # uuid

+        12345678-1234-5678-1234-567812345678

+        [UUID('12345678-1234-5678-1234-567812345678')]

+    """

+

+    convertToInteger = tokenMap(int)

+    """

+    Parse action for converting parsed integers to Python int

+    """

+

+    convertToFloat = tokenMap(float)

+    """

+    Parse action for converting parsed numbers to Python float

+    """

+

+    integer = Word(nums).setName("integer").setParseAction(convertToInteger)

+    """expression that parses an unsigned integer, returns an int"""

+

+    hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))

+    """expression that parses a hexadecimal integer, returns an int"""

+

+    signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)

+    """expression that parses an integer with optional leading sign, returns an int"""

+

+    fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")

+    """fractional expression of an integer divided by an integer, returns a float"""

+    fraction.addParseAction(lambda t: t[0]/t[-1])

+

+    mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")

+    """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""

+    mixed_integer.addParseAction(sum)

+

+    real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)

+    """expression that parses a floating point number and returns a float"""

+

+    sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)

+    """expression that parses a floating point number with optional scientific notation and returns a float"""

+

+    # streamlining this expression makes the docs nicer-looking

+    number = (sci_real | real | signed_integer).streamline()

+    """any numeric expression, returns the corresponding Python type"""

+

+    fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)

+    """any int or real number, returned as float"""

+    

+    identifier = Word(alphas+'_', alphanums+'_').setName("identifier")

+    """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""

+    

+    ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")

+    "IPv4 address (C{0.0.0.0 - 255.255.255.255})"

+

+    _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")

+    _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")

+    _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")

+    _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)

+    _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")

+    ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")

+    "IPv6 address (long, short, or mixed form)"

+    

+    mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")

+    "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"

+

+    @staticmethod

+    def convertToDate(fmt="%Y-%m-%d"):

+        """

+        Helper to create a parse action for converting parsed date string to Python datetime.date

+

+        Params -

+         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})

+

+        Example::

+            date_expr = pyparsing_common.iso8601_date.copy()

+            date_expr.setParseAction(pyparsing_common.convertToDate())

+            print(date_expr.parseString("1999-12-31"))

+        prints::

+            [datetime.date(1999, 12, 31)]

+        """

+        def cvt_fn(s,l,t):

+            try:

+                return datetime.strptime(t[0], fmt).date()

+            except ValueError as ve:

+                raise ParseException(s, l, str(ve))

+        return cvt_fn

+

+    @staticmethod

+    def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):

+        """

+        Helper to create a parse action for converting parsed datetime string to Python datetime.datetime

+

+        Params -

+         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})

+

+        Example::

+            dt_expr = pyparsing_common.iso8601_datetime.copy()

+            dt_expr.setParseAction(pyparsing_common.convertToDatetime())

+            print(dt_expr.parseString("1999-12-31T23:59:59.999"))

+        prints::

+            [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]

+        """

+        def cvt_fn(s,l,t):

+            try:

+                return datetime.strptime(t[0], fmt)

+            except ValueError as ve:

+                raise ParseException(s, l, str(ve))

+        return cvt_fn

+

+    iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")

+    "ISO8601 date (C{yyyy-mm-dd})"

+

+    iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")

+    "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"

+

+    uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")

+    "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"

+

+    _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()

+    @staticmethod

+    def stripHTMLTags(s, l, tokens):

+        """

+        Parse action to remove HTML tags from web page HTML source

+

+        Example::

+            # strip HTML links from normal text 

+            text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'

+            td,td_end = makeHTMLTags("TD")

+            table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end

+            

+            print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'

+        """

+        return pyparsing_common._html_stripper.transformString(tokens[0])

+

+    _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') 

+                                        + Optional( White(" \t") ) ) ).streamline().setName("commaItem")

+    comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")

+    """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""

+

+    upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))

+    """Parse action to convert tokens to upper case."""

+

+    downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))

+    """Parse action to convert tokens to lower case."""

+

+

+if __name__ == "__main__":

+

+    selectToken    = CaselessLiteral("select")

+    fromToken      = CaselessLiteral("from")

+

+    ident          = Word(alphas, alphanums + "_$")

+

+    columnName     = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)

+    columnNameList = Group(delimitedList(columnName)).setName("columns")

+    columnSpec     = ('*' | columnNameList)

+

+    tableName      = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)

+    tableNameList  = Group(delimitedList(tableName)).setName("tables")

+    

+    simpleSQL      = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")

+

+    # demo runTests method, including embedded comments in test string

+    simpleSQL.runTests("""

+        # '*' as column list and dotted table name

+        select * from SYS.XYZZY

+

+        # caseless match on "SELECT", and casts back to "select"

+        SELECT * from XYZZY, ABC

+

+        # list of column names, and mixed case SELECT keyword

+        Select AA,BB,CC from Sys.dual

+

+        # multiple tables

+        Select A, B, C from Sys.dual, Table2

+

+        # invalid SELECT keyword - should fail

+        Xelect A, B, C from Sys.dual

+

+        # incomplete command - should fail

+        Select

+

+        # invalid column name - should fail

+        Select ^^^ frox Sys.dual

+

+        """)

+

+    pyparsing_common.number.runTests("""

+        100

+        -100

+        +100

+        3.14159

+        6.02e23

+        1e-12

+        """)

+

+    # any int or real number, returned as float

+    pyparsing_common.fnumber.runTests("""

+        100

+        -100

+        +100

+        3.14159

+        6.02e23

+        1e-12

+        """)

+

+    pyparsing_common.hex_integer.runTests("""

+        100

+        FF

+        """)

+

+    import uuid

+    pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))

+    pyparsing_common.uuid.runTests("""

+        12345678-1234-5678-1234-567812345678

+        """)

diff --git a/setuptools/_vendor/six.py b/setuptools/_vendor/six.py
new file mode 100644
index 0000000..190c023
--- /dev/null
+++ b/setuptools/_vendor/six.py
@@ -0,0 +1,868 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+        del X
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)  # Invokes __set__.
+        try:
+            # This is a bit ugly, but it avoids running this again by
+            # removing this descriptor.
+            delattr(obj.__class__, self.name)
+        except AttributeError:
+            pass
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+    def __getattr__(self, attr):
+        _module = self._resolve()
+        value = getattr(_module, attr)
+        setattr(self, attr, value)
+        return value
+
+
+class _LazyModule(types.ModuleType):
+
+    def __init__(self, name):
+        super(_LazyModule, self).__init__(name)
+        self.__doc__ = self.__class__.__doc__
+
+    def __dir__(self):
+        attrs = ["__doc__", "__name__"]
+        attrs += [attr.name for attr in self._moved_attributes]
+        return attrs
+
+    # Subclasses should override this
+    _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+    """
+    A meta path importer to import six.moves and its submodules.
+
+    This class implements a PEP302 finder and loader. It should be compatible
+    with Python 2.5 and all existing versions of Python3
+    """
+
+    def __init__(self, six_module_name):
+        self.name = six_module_name
+        self.known_modules = {}
+
+    def _add_module(self, mod, *fullnames):
+        for fullname in fullnames:
+            self.known_modules[self.name + "." + fullname] = mod
+
+    def _get_module(self, fullname):
+        return self.known_modules[self.name + "." + fullname]
+
+    def find_module(self, fullname, path=None):
+        if fullname in self.known_modules:
+            return self
+        return None
+
+    def __get_module(self, fullname):
+        try:
+            return self.known_modules[fullname]
+        except KeyError:
+            raise ImportError("This loader does not know module " + fullname)
+
+    def load_module(self, fullname):
+        try:
+            # in case of a reload
+            return sys.modules[fullname]
+        except KeyError:
+            pass
+        mod = self.__get_module(fullname)
+        if isinstance(mod, MovedModule):
+            mod = mod._resolve()
+        else:
+            mod.__loader__ = self
+        sys.modules[fullname] = mod
+        return mod
+
+    def is_package(self, fullname):
+        """
+        Return true, if the named module is a package.
+
+        We need this method to get correct spec objects with
+        Python 3.4 (see PEP451)
+        """
+        return hasattr(self.__get_module(fullname), "__path__")
+
+    def get_code(self, fullname):
+        """Return None
+
+        Required, if is_package is implemented"""
+        self.__get_module(fullname)  # eventually raises ImportError
+        return None
+    get_source = get_code  # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+    """Lazy loading of moved objects"""
+    __path__ = []  # mark as package
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("intern", "__builtin__", "sys"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+    MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserDict", "UserDict", "collections"),
+    MovedAttribute("UserList", "UserList", "collections"),
+    MovedAttribute("UserString", "UserString", "collections"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+    MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("_thread", "thread", "_thread"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+    MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+    _moved_attributes += [
+        MovedModule("winreg", "_winreg"),
+    ]
+
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+    if isinstance(attr, MovedModule):
+        _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+    MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("quote", "urllib", "urllib.parse"),
+    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("urlencode", "urllib", "urllib.parse"),
+    MovedAttribute("splitquery", "urllib", "urllib.parse"),
+    MovedAttribute("splittag", "urllib", "urllib.parse"),
+    MovedAttribute("splituser", "urllib", "urllib.parse"),
+    MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+    setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+                      "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+    MovedAttribute("URLError", "urllib2", "urllib.error"),
+    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+    setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+                      "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+    MovedAttribute("build_opener", "urllib2", "urllib.request"),
+    MovedAttribute("pathname2url", "urllib", "urllib.request"),
+    MovedAttribute("url2pathname", "urllib", "urllib.request"),
+    MovedAttribute("getproxies", "urllib", "urllib.request"),
+    MovedAttribute("Request", "urllib2", "urllib.request"),
+    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+    MovedAttribute("URLopener", "urllib", "urllib.request"),
+    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+    MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+    setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+                      "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+    MovedAttribute("addbase", "urllib", "urllib.response"),
+    MovedAttribute("addclosehook", "urllib", "urllib.response"),
+    MovedAttribute("addinfo", "urllib", "urllib.response"),
+    MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+    setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+                      "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+                      "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+    __path__ = []  # mark as package
+    parse = _importer._get_module("moves.urllib_parse")
+    error = _importer._get_module("moves.urllib_error")
+    request = _importer._get_module("moves.urllib_request")
+    response = _importer._get_module("moves.urllib_response")
+    robotparser = _importer._get_module("moves.urllib_robotparser")
+
+    def __dir__(self):
+        return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+                      "moves.urllib")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    create_bound_method = types.MethodType
+
+    def create_unbound_method(func, cls):
+        return func
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    def create_bound_method(func, obj):
+        return types.MethodType(func, obj, obj.__class__)
+
+    def create_unbound_method(func, cls):
+        return types.MethodType(func, None, cls)
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+    def iterkeys(d, **kw):
+        return iter(d.keys(**kw))
+
+    def itervalues(d, **kw):
+        return iter(d.values(**kw))
+
+    def iteritems(d, **kw):
+        return iter(d.items(**kw))
+
+    def iterlists(d, **kw):
+        return iter(d.lists(**kw))
+
+    viewkeys = operator.methodcaller("keys")
+
+    viewvalues = operator.methodcaller("values")
+
+    viewitems = operator.methodcaller("items")
+else:
+    def iterkeys(d, **kw):
+        return d.iterkeys(**kw)
+
+    def itervalues(d, **kw):
+        return d.itervalues(**kw)
+
+    def iteritems(d, **kw):
+        return d.iteritems(**kw)
+
+    def iterlists(d, **kw):
+        return d.iterlists(**kw)
+
+    viewkeys = operator.methodcaller("viewkeys")
+
+    viewvalues = operator.methodcaller("viewvalues")
+
+    viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+         "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+         "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+
+    def u(s):
+        return s
+    unichr = chr
+    import struct
+    int2byte = struct.Struct(">B").pack
+    del struct
+    byte2int = operator.itemgetter(0)
+    indexbytes = operator.getitem
+    iterbytes = iter
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+    _assertCountEqual = "assertCountEqual"
+    if sys.version_info[1] <= 1:
+        _assertRaisesRegex = "assertRaisesRegexp"
+        _assertRegex = "assertRegexpMatches"
+    else:
+        _assertRaisesRegex = "assertRaisesRegex"
+        _assertRegex = "assertRegex"
+else:
+    def b(s):
+        return s
+    # Workaround for standalone backslash
+
+    def u(s):
+        return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+    unichr = unichr
+    int2byte = chr
+
+    def byte2int(bs):
+        return ord(bs[0])
+
+    def indexbytes(buf, i):
+        return ord(buf[i])
+    iterbytes = functools.partial(itertools.imap, ord)
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+    _assertCountEqual = "assertItemsEqual"
+    _assertRaisesRegex = "assertRaisesRegexp"
+    _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+    return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+    return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+    return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+    exec_ = getattr(moves.builtins, "exec")
+
+    def reraise(tp, value, tb=None):
+        if value is None:
+            value = tp()
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+    exec_("""def reraise(tp, value, tb=None):
+    raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+    exec_("""def raise_from(value, from_value):
+    if from_value is None:
+        raise value
+    raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+    exec_("""def raise_from(value, from_value):
+    raise value from from_value
+""")
+else:
+    def raise_from(value, from_value):
+        raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+    def print_(*args, **kwargs):
+        """The new-style print function for Python 2.4 and 2.5."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            # If the file has an encoding, encode unicode with it.
+            if (isinstance(fp, file) and
+                    isinstance(data, unicode) and
+                    fp.encoding is not None):
+                errors = getattr(fp, "errors", None)
+                if errors is None:
+                    errors = "strict"
+                data = data.encode(fp.encoding, errors)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+if sys.version_info[:2] < (3, 3):
+    _print = print_
+
+    def print_(*args, **kwargs):
+        fp = kwargs.get("file", sys.stdout)
+        flush = kwargs.pop("flush", False)
+        _print(*args, **kwargs)
+        if flush and fp is not None:
+            fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+    def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+              updated=functools.WRAPPER_UPDATES):
+        def wrapper(f):
+            f = functools.wraps(wrapped, assigned, updated)(f)
+            f.__wrapped__ = wrapped
+            return f
+        return wrapper
+else:
+    wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+    """Class decorator for creating a class with a metaclass."""
+    def wrapper(cls):
+        orig_vars = cls.__dict__.copy()
+        slots = orig_vars.get('__slots__')
+        if slots is not None:
+            if isinstance(slots, str):
+                slots = [slots]
+            for slots_var in slots:
+                orig_vars.pop(slots_var)
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
+        return metaclass(cls.__name__, cls.__bases__, orig_vars)
+    return wrapper
+
+
+def python_2_unicode_compatible(klass):
+    """
+    A decorator that defines __unicode__ and __str__ methods under Python 2.
+    Under Python 3 it does nothing.
+
+    To support Python 2 and 3 with a single code base, define a __str__ method
+    returning text and apply this decorator to the class.
+    """
+    if PY2:
+        if '__str__' not in klass.__dict__:
+            raise ValueError("@python_2_unicode_compatible cannot be applied "
+                             "to %s because it doesn't define __str__()." %
+                             klass.__name__)
+        klass.__unicode__ = klass.__str__
+        klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+    return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = []  # required for PEP 302 and PEP 451
+__package__ = __name__  # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+    __spec__.submodule_search_locations = []  # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+    for i, importer in enumerate(sys.meta_path):
+        # Here's some real nastiness: Another "instance" of the six module might
+        # be floating around. Therefore, we can't use isinstance() to check for
+        # the six meta path importer, since the other six instance will have
+        # inserted an importer with different class.
+        if (type(importer).__name__ == "_SixMetaPathImporter" and
+                importer.name == __name__):
+            del sys.meta_path[i]
+            break
+    del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/setuptools/_vendor/vendored.txt b/setuptools/_vendor/vendored.txt
new file mode 100644
index 0000000..be3e72e
--- /dev/null
+++ b/setuptools/_vendor/vendored.txt
@@ -0,0 +1,3 @@
+packaging==16.8
+pyparsing==2.1.10
+six==1.10.0
diff --git a/setuptools/archive_util.py b/setuptools/archive_util.py
new file mode 100755
index 0000000..8143604
--- /dev/null
+++ b/setuptools/archive_util.py
@@ -0,0 +1,173 @@
+"""Utilities for extracting common archive formats"""
+
+import zipfile
+import tarfile
+import os
+import shutil
+import posixpath
+import contextlib
+from distutils.errors import DistutilsError
+
+from pkg_resources import ensure_directory
+
+__all__ = [
+    "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
+    "UnrecognizedFormat", "extraction_drivers", "unpack_directory",
+]
+
+
+class UnrecognizedFormat(DistutilsError):
+    """Couldn't recognize the archive type"""
+
+
+def default_filter(src, dst):
+    """The default progress/filter callback; returns True for all files"""
+    return dst
+
+
+def unpack_archive(filename, extract_dir, progress_filter=default_filter,
+        drivers=None):
+    """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
+
+    `progress_filter` is a function taking two arguments: a source path
+    internal to the archive ('/'-separated), and a filesystem path where it
+    will be extracted.  The callback must return the desired extract path
+    (which may be the same as the one passed in), or else ``None`` to skip
+    that file or directory.  The callback can thus be used to report on the
+    progress of the extraction, as well as to filter the items extracted or
+    alter their extraction paths.
+
+    `drivers`, if supplied, must be a non-empty sequence of functions with the
+    same signature as this function (minus the `drivers` argument), that raise
+    ``UnrecognizedFormat`` if they do not support extracting the designated
+    archive type.  The `drivers` are tried in sequence until one is found that
+    does not raise an error, or until all are exhausted (in which case
+    ``UnrecognizedFormat`` is raised).  If you do not supply a sequence of
+    drivers, the module's ``extraction_drivers`` constant will be used, which
+    means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
+    order.
+    """
+    for driver in drivers or extraction_drivers:
+        try:
+            driver(filename, extract_dir, progress_filter)
+        except UnrecognizedFormat:
+            continue
+        else:
+            return
+    else:
+        raise UnrecognizedFormat(
+            "Not a recognized archive type: %s" % filename
+        )
+
+
+def unpack_directory(filename, extract_dir, progress_filter=default_filter):
+    """"Unpack" a directory, using the same interface as for archives
+
+    Raises ``UnrecognizedFormat`` if `filename` is not a directory
+    """
+    if not os.path.isdir(filename):
+        raise UnrecognizedFormat("%s is not a directory" % filename)
+
+    paths = {
+        filename: ('', extract_dir),
+    }
+    for base, dirs, files in os.walk(filename):
+        src, dst = paths[base]
+        for d in dirs:
+            paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
+        for f in files:
+            target = os.path.join(dst, f)
+            target = progress_filter(src + f, target)
+            if not target:
+                # skip non-files
+                continue
+            ensure_directory(target)
+            f = os.path.join(base, f)
+            shutil.copyfile(f, target)
+            shutil.copystat(f, target)
+
+
+def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
+    """Unpack zip `filename` to `extract_dir`
+
+    Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
+    by ``zipfile.is_zipfile()``).  See ``unpack_archive()`` for an explanation
+    of the `progress_filter` argument.
+    """
+
+    if not zipfile.is_zipfile(filename):
+        raise UnrecognizedFormat("%s is not a zip file" % (filename,))
+
+    with zipfile.ZipFile(filename) as z:
+        for info in z.infolist():
+            name = info.filename
+
+            # don't extract absolute paths or ones with .. in them
+            if name.startswith('/') or '..' in name.split('/'):
+                continue
+
+            target = os.path.join(extract_dir, *name.split('/'))
+            target = progress_filter(name, target)
+            if not target:
+                continue
+            if name.endswith('/'):
+                # directory
+                ensure_directory(target)
+            else:
+                # file
+                ensure_directory(target)
+                data = z.read(info.filename)
+                with open(target, 'wb') as f:
+                    f.write(data)
+            unix_attributes = info.external_attr >> 16
+            if unix_attributes:
+                os.chmod(target, unix_attributes)
+
+
+def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
+    """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
+
+    Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
+    by ``tarfile.open()``).  See ``unpack_archive()`` for an explanation
+    of the `progress_filter` argument.
+    """
+    try:
+        tarobj = tarfile.open(filename)
+    except tarfile.TarError:
+        raise UnrecognizedFormat(
+            "%s is not a compressed or uncompressed tar file" % (filename,)
+        )
+    with contextlib.closing(tarobj):
+        # don't do any chowning!
+        tarobj.chown = lambda *args: None
+        for member in tarobj:
+            name = member.name
+            # don't extract absolute paths or ones with .. in them
+            if not name.startswith('/') and '..' not in name.split('/'):
+                prelim_dst = os.path.join(extract_dir, *name.split('/'))
+
+                # resolve any links and to extract the link targets as normal
+                # files
+                while member is not None and (member.islnk() or member.issym()):
+                    linkpath = member.linkname
+                    if member.issym():
+                        base = posixpath.dirname(member.name)
+                        linkpath = posixpath.join(base, linkpath)
+                        linkpath = posixpath.normpath(linkpath)
+                    member = tarobj._getmember(linkpath)
+
+                if member is not None and (member.isfile() or member.isdir()):
+                    final_dst = progress_filter(name, prelim_dst)
+                    if final_dst:
+                        if final_dst.endswith(os.sep):
+                            final_dst = final_dst[:-1]
+                        try:
+                            # XXX Ugh
+                            tarobj._extract_member(member, final_dst)
+                        except tarfile.ExtractError:
+                            # chown/chmod/mkfifo/mknode/makedev failed
+                            pass
+        return True
+
+
+extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
diff --git a/setuptools/build_meta.py b/setuptools/build_meta.py
new file mode 100644
index 0000000..609ea1e
--- /dev/null
+++ b/setuptools/build_meta.py
@@ -0,0 +1,172 @@
+"""A PEP 517 interface to setuptools
+
+Previously, when a user or a command line tool (let's call it a "frontend")
+needed to make a request of setuptools to take a certain action, for
+example, generating a list of installation requirements, the frontend would
+would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
+
+PEP 517 defines a different method of interfacing with setuptools. Rather
+than calling "setup.py" directly, the frontend should:
+
+  1. Set the current directory to the directory with a setup.py file
+  2. Import this module into a safe python interpreter (one in which
+     setuptools can potentially set global variables or crash hard).
+  3. Call one of the functions defined in PEP 517.
+
+What each function does is defined in PEP 517. However, here is a "casual"
+definition of the functions (this definition should not be relied on for
+bug reports or API stability):
+
+  - `build_wheel`: build a wheel in the folder and return the basename
+  - `get_requires_for_build_wheel`: get the `setup_requires` to build
+  - `prepare_metadata_for_build_wheel`: get the `install_requires`
+  - `build_sdist`: build an sdist in the folder and return the basename
+  - `get_requires_for_build_sdist`: get the `setup_requires` to build
+
+Again, this is not a formal definition! Just a "taste" of the module.
+"""
+
+import os
+import sys
+import tokenize
+import shutil
+import contextlib
+
+import setuptools
+import distutils
+
+
+class SetupRequirementsError(BaseException):
+    def __init__(self, specifiers):
+        self.specifiers = specifiers
+
+
+class Distribution(setuptools.dist.Distribution):
+    def fetch_build_eggs(self, specifiers):
+        raise SetupRequirementsError(specifiers)
+
+    @classmethod
+    @contextlib.contextmanager
+    def patch(cls):
+        """
+        Replace
+        distutils.dist.Distribution with this class
+        for the duration of this context.
+        """
+        orig = distutils.core.Distribution
+        distutils.core.Distribution = cls
+        try:
+            yield
+        finally:
+            distutils.core.Distribution = orig
+
+
+def _run_setup(setup_script='setup.py'):
+    # Note that we can reuse our build directory between calls
+    # Correctness comes first, then optimization later
+    __file__ = setup_script
+    __name__ = '__main__'
+    f = getattr(tokenize, 'open', open)(__file__)
+    code = f.read().replace('\\r\\n', '\\n')
+    f.close()
+    exec(compile(code, __file__, 'exec'), locals())
+
+
+def _fix_config(config_settings):
+    config_settings = config_settings or {}
+    config_settings.setdefault('--global-option', [])
+    return config_settings
+
+
+def _get_build_requires(config_settings):
+    config_settings = _fix_config(config_settings)
+    requirements = ['setuptools', 'wheel']
+
+    sys.argv = sys.argv[:1] + ['egg_info'] + \
+        config_settings["--global-option"]
+    try:
+        with Distribution.patch():
+            _run_setup()
+    except SetupRequirementsError as e:
+        requirements += e.specifiers
+
+    return requirements
+
+
+def _get_immediate_subdirectories(a_dir):
+    return [name for name in os.listdir(a_dir)
+            if os.path.isdir(os.path.join(a_dir, name))]
+
+
+def get_requires_for_build_wheel(config_settings=None):
+    config_settings = _fix_config(config_settings)
+    return _get_build_requires(config_settings)
+
+
+def get_requires_for_build_sdist(config_settings=None):
+    config_settings = _fix_config(config_settings)
+    return _get_build_requires(config_settings)
+
+
+def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None):
+    sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', metadata_directory]
+    _run_setup()
+    
+    dist_info_directory = metadata_directory
+    while True:    
+        dist_infos = [f for f in os.listdir(dist_info_directory)
+                      if f.endswith('.dist-info')]
+
+        if len(dist_infos) == 0 and \
+                len(_get_immediate_subdirectories(dist_info_directory)) == 1:
+            dist_info_directory = os.path.join(
+                dist_info_directory, os.listdir(dist_info_directory)[0])
+            continue
+
+        assert len(dist_infos) == 1
+        break
+
+    # PEP 517 requires that the .dist-info directory be placed in the
+    # metadata_directory. To comply, we MUST copy the directory to the root
+    if dist_info_directory != metadata_directory:
+        shutil.move(
+            os.path.join(dist_info_directory, dist_infos[0]),
+            metadata_directory)
+        shutil.rmtree(dist_info_directory, ignore_errors=True)
+
+    return dist_infos[0]
+
+
+def build_wheel(wheel_directory, config_settings=None,
+                metadata_directory=None):
+    config_settings = _fix_config(config_settings)
+    wheel_directory = os.path.abspath(wheel_directory)
+    sys.argv = sys.argv[:1] + ['bdist_wheel'] + \
+        config_settings["--global-option"]
+    _run_setup()
+    if wheel_directory != 'dist':
+        shutil.rmtree(wheel_directory)
+        shutil.copytree('dist', wheel_directory)
+
+    wheels = [f for f in os.listdir(wheel_directory)
+              if f.endswith('.whl')]
+
+    assert len(wheels) == 1
+    return wheels[0]
+
+
+def build_sdist(sdist_directory, config_settings=None):
+    config_settings = _fix_config(config_settings)
+    sdist_directory = os.path.abspath(sdist_directory)
+    sys.argv = sys.argv[:1] + ['sdist'] + \
+        config_settings["--global-option"]
+    _run_setup()
+    if sdist_directory != 'dist':
+        shutil.rmtree(sdist_directory)
+        shutil.copytree('dist', sdist_directory)
+
+    sdists = [f for f in os.listdir(sdist_directory)
+              if f.endswith('.tar.gz')]
+
+    assert len(sdists) == 1
+    return sdists[0]
diff --git a/setuptools/cli-32.exe b/setuptools/cli-32.exe
new file mode 100644
index 0000000..b1487b7
--- /dev/null
+++ b/setuptools/cli-32.exe
Binary files differ
diff --git a/setuptools/cli-64.exe b/setuptools/cli-64.exe
new file mode 100644
index 0000000..675e6bf
--- /dev/null
+++ b/setuptools/cli-64.exe
Binary files differ
diff --git a/setuptools/cli.exe b/setuptools/cli.exe
new file mode 100644
index 0000000..b1487b7
--- /dev/null
+++ b/setuptools/cli.exe
Binary files differ
diff --git a/setuptools/command/__init__.py b/setuptools/command/__init__.py
new file mode 100644
index 0000000..fe619e2
--- /dev/null
+++ b/setuptools/command/__init__.py
@@ -0,0 +1,18 @@
+__all__ = [
+    'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop',
+    'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts',
+    'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts',
+    'register', 'bdist_wininst', 'upload_docs', 'upload', 'build_clib',
+    'dist_info',
+]
+
+from distutils.command.bdist import bdist
+import sys
+
+from setuptools.command import install_scripts
+
+if 'egg' not in bdist.format_commands:
+    bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
+    bdist.format_commands.append('egg')
+
+del bdist, sys
diff --git a/setuptools/command/alias.py b/setuptools/command/alias.py
new file mode 100755
index 0000000..4532b1c
--- /dev/null
+++ b/setuptools/command/alias.py
@@ -0,0 +1,80 @@
+from distutils.errors import DistutilsOptionError
+
+from setuptools.extern.six.moves import map
+
+from setuptools.command.setopt import edit_config, option_base, config_file
+
+
+def shquote(arg):
+    """Quote an argument for later parsing by shlex.split()"""
+    for c in '"', "'", "\\", "#":
+        if c in arg:
+            return repr(arg)
+    if arg.split() != [arg]:
+        return repr(arg)
+    return arg
+
+
+class alias(option_base):
+    """Define a shortcut that invokes one or more commands"""
+
+    description = "define a shortcut to invoke one or more commands"
+    command_consumes_arguments = True
+
+    user_options = [
+        ('remove', 'r', 'remove (unset) the alias'),
+    ] + option_base.user_options
+
+    boolean_options = option_base.boolean_options + ['remove']
+
+    def initialize_options(self):
+        option_base.initialize_options(self)
+        self.args = None
+        self.remove = None
+
+    def finalize_options(self):
+        option_base.finalize_options(self)
+        if self.remove and len(self.args) != 1:
+            raise DistutilsOptionError(
+                "Must specify exactly one argument (the alias name) when "
+                "using --remove"
+            )
+
+    def run(self):
+        aliases = self.distribution.get_option_dict('aliases')
+
+        if not self.args:
+            print("Command Aliases")
+            print("---------------")
+            for alias in aliases:
+                print("setup.py alias", format_alias(alias, aliases))
+            return
+
+        elif len(self.args) == 1:
+            alias, = self.args
+            if self.remove:
+                command = None
+            elif alias in aliases:
+                print("setup.py alias", format_alias(alias, aliases))
+                return
+            else:
+                print("No alias definition found for %r" % alias)
+                return
+        else:
+            alias = self.args[0]
+            command = ' '.join(map(shquote, self.args[1:]))
+
+        edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
+
+
+def format_alias(name, aliases):
+    source, command = aliases[name]
+    if source == config_file('global'):
+        source = '--global-config '
+    elif source == config_file('user'):
+        source = '--user-config '
+    elif source == config_file('local'):
+        source = ''
+    else:
+        source = '--filename=%r' % source
+    return source + name + ' ' + command
diff --git a/setuptools/command/bdist_egg.py b/setuptools/command/bdist_egg.py
new file mode 100644
index 0000000..423b818
--- /dev/null
+++ b/setuptools/command/bdist_egg.py
@@ -0,0 +1,502 @@
+"""setuptools.command.bdist_egg
+
+Build .egg distributions"""
+
+from distutils.errors import DistutilsSetupError
+from distutils.dir_util import remove_tree, mkpath
+from distutils import log
+from types import CodeType
+import sys
+import os
+import re
+import textwrap
+import marshal
+
+from setuptools.extern import six
+
+from pkg_resources import get_build_platform, Distribution, ensure_directory
+from pkg_resources import EntryPoint
+from setuptools.extension import Library
+from setuptools import Command
+
+try:
+    # Python 2.7 or >=3.2
+    from sysconfig import get_path, get_python_version
+
+    def _get_purelib():
+        return get_path("purelib")
+except ImportError:
+    from distutils.sysconfig import get_python_lib, get_python_version
+
+    def _get_purelib():
+        return get_python_lib(False)
+
+
+def strip_module(filename):
+    if '.' in filename:
+        filename = os.path.splitext(filename)[0]
+    if filename.endswith('module'):
+        filename = filename[:-6]
+    return filename
+
+
+def sorted_walk(dir):
+    """Do os.walk in a reproducible way,
+    independent of indeterministic filesystem readdir order
+    """
+    for base, dirs, files in os.walk(dir):
+        dirs.sort()
+        files.sort()
+        yield base, dirs, files
+
+
+def write_stub(resource, pyfile):
+    _stub_template = textwrap.dedent("""
+        def __bootstrap__():
+            global __bootstrap__, __loader__, __file__
+            import sys, pkg_resources, imp
+            __file__ = pkg_resources.resource_filename(__name__, %r)
+            __loader__ = None; del __bootstrap__, __loader__
+            imp.load_dynamic(__name__,__file__)
+        __bootstrap__()
+        """).lstrip()
+    with open(pyfile, 'w') as f:
+        f.write(_stub_template % resource)
+
+
+class bdist_egg(Command):
+    description = "create an \"egg\" distribution"
+
+    user_options = [
+        ('bdist-dir=', 'b',
+         "temporary directory for creating the distribution"),
+        ('plat-name=', 'p', "platform name to embed in generated filenames "
+                            "(default: %s)" % get_build_platform()),
+        ('exclude-source-files', None,
+         "remove all .py files from the generated egg"),
+        ('keep-temp', 'k',
+         "keep the pseudo-installation tree around after " +
+         "creating the distribution archive"),
+        ('dist-dir=', 'd',
+         "directory to put final built distributions in"),
+        ('skip-build', None,
+         "skip rebuilding everything (for testing/debugging)"),
+    ]
+
+    boolean_options = [
+        'keep-temp', 'skip-build', 'exclude-source-files'
+    ]
+
+    def initialize_options(self):
+        self.bdist_dir = None
+        self.plat_name = None
+        self.keep_temp = 0
+        self.dist_dir = None
+        self.skip_build = 0
+        self.egg_output = None
+        self.exclude_source_files = None
+
+    def finalize_options(self):
+        ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
+        self.egg_info = ei_cmd.egg_info
+
+        if self.bdist_dir is None:
+            bdist_base = self.get_finalized_command('bdist').bdist_base
+            self.bdist_dir = os.path.join(bdist_base, 'egg')
+
+        if self.plat_name is None:
+            self.plat_name = get_build_platform()
+
+        self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+        if self.egg_output is None:
+
+            # Compute filename of the output egg
+            basename = Distribution(
+                None, None, ei_cmd.egg_name, ei_cmd.egg_version,
+                get_python_version(),
+                self.distribution.has_ext_modules() and self.plat_name
+            ).egg_name()
+
+            self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
+
+    def do_install_data(self):
+        # Hack for packages that install data to install's --install-lib
+        self.get_finalized_command('install').install_lib = self.bdist_dir
+
+        site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
+        old, self.distribution.data_files = self.distribution.data_files, []
+
+        for item in old:
+            if isinstance(item, tuple) and len(item) == 2:
+                if os.path.isabs(item[0]):
+                    realpath = os.path.realpath(item[0])
+                    normalized = os.path.normcase(realpath)
+                    if normalized == site_packages or normalized.startswith(
+                        site_packages + os.sep
+                    ):
+                        item = realpath[len(site_packages) + 1:], item[1]
+                        # XXX else: raise ???
+            self.distribution.data_files.append(item)
+
+        try:
+            log.info("installing package data to %s", self.bdist_dir)
+            self.call_command('install_data', force=0, root=None)
+        finally:
+            self.distribution.data_files = old
+
+    def get_outputs(self):
+        return [self.egg_output]
+
+    def call_command(self, cmdname, **kw):
+        """Invoke reinitialized command `cmdname` with keyword args"""
+        for dirname in INSTALL_DIRECTORY_ATTRS:
+            kw.setdefault(dirname, self.bdist_dir)
+        kw.setdefault('skip_build', self.skip_build)
+        kw.setdefault('dry_run', self.dry_run)
+        cmd = self.reinitialize_command(cmdname, **kw)
+        self.run_command(cmdname)
+        return cmd
+
+    def run(self):
+        # Generate metadata first
+        self.run_command("egg_info")
+        # We run install_lib before install_data, because some data hacks
+        # pull their data path from the install_lib command.
+        log.info("installing library code to %s", self.bdist_dir)
+        instcmd = self.get_finalized_command('install')
+        old_root = instcmd.root
+        instcmd.root = None
+        if self.distribution.has_c_libraries() and not self.skip_build:
+            self.run_command('build_clib')
+        cmd = self.call_command('install_lib', warn_dir=0)
+        instcmd.root = old_root
+
+        all_outputs, ext_outputs = self.get_ext_outputs()
+        self.stubs = []
+        to_compile = []
+        for (p, ext_name) in enumerate(ext_outputs):
+            filename, ext = os.path.splitext(ext_name)
+            pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
+                                  '.py')
+            self.stubs.append(pyfile)
+            log.info("creating stub loader for %s", ext_name)
+            if not self.dry_run:
+                write_stub(os.path.basename(ext_name), pyfile)
+            to_compile.append(pyfile)
+            ext_outputs[p] = ext_name.replace(os.sep, '/')
+
+        if to_compile:
+            cmd.byte_compile(to_compile)
+        if self.distribution.data_files:
+            self.do_install_data()
+
+        # Make the EGG-INFO directory
+        archive_root = self.bdist_dir
+        egg_info = os.path.join(archive_root, 'EGG-INFO')
+        self.mkpath(egg_info)
+        if self.distribution.scripts:
+            script_dir = os.path.join(egg_info, 'scripts')
+            log.info("installing scripts to %s", script_dir)
+            self.call_command('install_scripts', install_dir=script_dir,
+                              no_ep=1)
+
+        self.copy_metadata_to(egg_info)
+        native_libs = os.path.join(egg_info, "native_libs.txt")
+        if all_outputs:
+            log.info("writing %s", native_libs)
+            if not self.dry_run:
+                ensure_directory(native_libs)
+                libs_file = open(native_libs, 'wt')
+                libs_file.write('\n'.join(all_outputs))
+                libs_file.write('\n')
+                libs_file.close()
+        elif os.path.isfile(native_libs):
+            log.info("removing %s", native_libs)
+            if not self.dry_run:
+                os.unlink(native_libs)
+
+        write_safety_flag(
+            os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
+        )
+
+        if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
+            log.warn(
+                "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
+                "Use the install_requires/extras_require setup() args instead."
+            )
+
+        if self.exclude_source_files:
+            self.zap_pyfiles()
+
+        # Make the archive
+        make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
+                     dry_run=self.dry_run, mode=self.gen_header())
+        if not self.keep_temp:
+            remove_tree(self.bdist_dir, dry_run=self.dry_run)
+
+        # Add to 'Distribution.dist_files' so that the "upload" command works
+        getattr(self.distribution, 'dist_files', []).append(
+            ('bdist_egg', get_python_version(), self.egg_output))
+
+    def zap_pyfiles(self):
+        log.info("Removing .py files from temporary directory")
+        for base, dirs, files in walk_egg(self.bdist_dir):
+            for name in files:
+                path = os.path.join(base, name)
+
+                if name.endswith('.py'):
+                    log.debug("Deleting %s", path)
+                    os.unlink(path)
+
+                if base.endswith('__pycache__'):
+                    path_old = path
+
+                    pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc'
+                    m = re.match(pattern, name)
+                    path_new = os.path.join(
+                        base, os.pardir, m.group('name') + '.pyc')
+                    log.info(
+                        "Renaming file from [%s] to [%s]"
+                        % (path_old, path_new))
+                    try:
+                        os.remove(path_new)
+                    except OSError:
+                        pass
+                    os.rename(path_old, path_new)
+
+    def zip_safe(self):
+        safe = getattr(self.distribution, 'zip_safe', None)
+        if safe is not None:
+            return safe
+        log.warn("zip_safe flag not set; analyzing archive contents...")
+        return analyze_egg(self.bdist_dir, self.stubs)
+
+    def gen_header(self):
+        epm = EntryPoint.parse_map(self.distribution.entry_points or '')
+        ep = epm.get('setuptools.installation', {}).get('eggsecutable')
+        if ep is None:
+            return 'w'  # not an eggsecutable, do it the usual way.
+
+        if not ep.attrs or ep.extras:
+            raise DistutilsSetupError(
+                "eggsecutable entry point (%r) cannot have 'extras' "
+                "or refer to a module" % (ep,)
+            )
+
+        pyver = sys.version[:3]
+        pkg = ep.module_name
+        full = '.'.join(ep.attrs)
+        base = ep.attrs[0]
+        basename = os.path.basename(self.egg_output)
+
+        header = (
+            "#!/bin/sh\n"
+            'if [ `basename $0` = "%(basename)s" ]\n'
+            'then exec python%(pyver)s -c "'
+            "import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
+            "from %(pkg)s import %(base)s; sys.exit(%(full)s())"
+            '" "$@"\n'
+            'else\n'
+            '  echo $0 is not the correct name for this egg file.\n'
+            '  echo Please rename it back to %(basename)s and try again.\n'
+            '  exec false\n'
+            'fi\n'
+        ) % locals()
+
+        if not self.dry_run:
+            mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
+            f = open(self.egg_output, 'w')
+            f.write(header)
+            f.close()
+        return 'a'
+
+    def copy_metadata_to(self, target_dir):
+        "Copy metadata (egg info) to the target_dir"
+        # normalize the path (so that a forward-slash in egg_info will
+        # match using startswith below)
+        norm_egg_info = os.path.normpath(self.egg_info)
+        prefix = os.path.join(norm_egg_info, '')
+        for path in self.ei_cmd.filelist.files:
+            if path.startswith(prefix):
+                target = os.path.join(target_dir, path[len(prefix):])
+                ensure_directory(target)
+                self.copy_file(path, target)
+
+    def get_ext_outputs(self):
+        """Get a list of relative paths to C extensions in the output distro"""
+
+        all_outputs = []
+        ext_outputs = []
+
+        paths = {self.bdist_dir: ''}
+        for base, dirs, files in sorted_walk(self.bdist_dir):
+            for filename in files:
+                if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
+                    all_outputs.append(paths[base] + filename)
+            for filename in dirs:
+                paths[os.path.join(base, filename)] = (paths[base] +
+                                                       filename + '/')
+
+        if self.distribution.has_ext_modules():
+            build_cmd = self.get_finalized_command('build_ext')
+            for ext in build_cmd.extensions:
+                if isinstance(ext, Library):
+                    continue
+                fullname = build_cmd.get_ext_fullname(ext.name)
+                filename = build_cmd.get_ext_filename(fullname)
+                if not os.path.basename(filename).startswith('dl-'):
+                    if os.path.exists(os.path.join(self.bdist_dir, filename)):
+                        ext_outputs.append(filename)
+
+        return all_outputs, ext_outputs
+
+
+NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
+
+
+def walk_egg(egg_dir):
+    """Walk an unpacked egg's contents, skipping the metadata directory"""
+    walker = sorted_walk(egg_dir)
+    base, dirs, files = next(walker)
+    if 'EGG-INFO' in dirs:
+        dirs.remove('EGG-INFO')
+    yield base, dirs, files
+    for bdf in walker:
+        yield bdf
+
+
+def analyze_egg(egg_dir, stubs):
+    # check for existing flag in EGG-INFO
+    for flag, fn in safety_flags.items():
+        if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
+            return flag
+    if not can_scan():
+        return False
+    safe = True
+    for base, dirs, files in walk_egg(egg_dir):
+        for name in files:
+            if name.endswith('.py') or name.endswith('.pyw'):
+                continue
+            elif name.endswith('.pyc') or name.endswith('.pyo'):
+                # always scan, even if we already know we're not safe
+                safe = scan_module(egg_dir, base, name, stubs) and safe
+    return safe
+
+
+def write_safety_flag(egg_dir, safe):
+    # Write or remove zip safety flag file(s)
+    for flag, fn in safety_flags.items():
+        fn = os.path.join(egg_dir, fn)
+        if os.path.exists(fn):
+            if safe is None or bool(safe) != flag:
+                os.unlink(fn)
+        elif safe is not None and bool(safe) == flag:
+            f = open(fn, 'wt')
+            f.write('\n')
+            f.close()
+
+
+safety_flags = {
+    True: 'zip-safe',
+    False: 'not-zip-safe',
+}
+
+
+def scan_module(egg_dir, base, name, stubs):
+    """Check whether module possibly uses unsafe-for-zipfile stuff"""
+
+    filename = os.path.join(base, name)
+    if filename[:-1] in stubs:
+        return True  # Extension module
+    pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
+    module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
+    if sys.version_info < (3, 3):
+        skip = 8  # skip magic & date
+    elif sys.version_info < (3, 7):
+        skip = 12  # skip magic & date & file size
+    else:
+        skip = 16  # skip magic & reserved? & date & file size
+    f = open(filename, 'rb')
+    f.read(skip)
+    code = marshal.load(f)
+    f.close()
+    safe = True
+    symbols = dict.fromkeys(iter_symbols(code))
+    for bad in ['__file__', '__path__']:
+        if bad in symbols:
+            log.warn("%s: module references %s", module, bad)
+            safe = False
+    if 'inspect' in symbols:
+        for bad in [
+            'getsource', 'getabsfile', 'getsourcefile', 'getfile'
+            'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
+            'getinnerframes', 'getouterframes', 'stack', 'trace'
+        ]:
+            if bad in symbols:
+                log.warn("%s: module MAY be using inspect.%s", module, bad)
+                safe = False
+    return safe
+
+
+def iter_symbols(code):
+    """Yield names and strings used by `code` and its nested code objects"""
+    for name in code.co_names:
+        yield name
+    for const in code.co_consts:
+        if isinstance(const, six.string_types):
+            yield const
+        elif isinstance(const, CodeType):
+            for name in iter_symbols(const):
+                yield name
+
+
+def can_scan():
+    if not sys.platform.startswith('java') and sys.platform != 'cli':
+        # CPython, PyPy, etc.
+        return True
+    log.warn("Unable to analyze compiled code on this platform.")
+    log.warn("Please ask the author to include a 'zip_safe'"
+             " setting (either True or False) in the package's setup.py")
+
+
+# Attribute names of options for commands that might need to be convinced to
+# install to the egg build directory
+
+INSTALL_DIRECTORY_ATTRS = [
+    'install_lib', 'install_dir', 'install_data', 'install_base'
+]
+
+
+def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
+                 mode='w'):
+    """Create a zip file from all the files under 'base_dir'.  The output
+    zip file will be named 'base_dir' + ".zip".  Uses either the "zipfile"
+    Python module (if available) or the InfoZIP "zip" utility (if installed
+    and found on the default search path).  If neither tool is available,
+    raises DistutilsExecError.  Returns the name of the output zip file.
+    """
+    import zipfile
+
+    mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
+    log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
+
+    def visit(z, dirname, names):
+        for name in names:
+            path = os.path.normpath(os.path.join(dirname, name))
+            if os.path.isfile(path):
+                p = path[len(base_dir) + 1:]
+                if not dry_run:
+                    z.write(path, p)
+                log.debug("adding '%s'", p)
+
+    compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
+    if not dry_run:
+        z = zipfile.ZipFile(zip_filename, mode, compression=compression)
+        for dirname, dirs, files in sorted_walk(base_dir):
+            visit(z, dirname, files)
+        z.close()
+    else:
+        for dirname, dirs, files in sorted_walk(base_dir):
+            visit(None, dirname, files)
+    return zip_filename
diff --git a/setuptools/command/bdist_rpm.py b/setuptools/command/bdist_rpm.py
new file mode 100755
index 0000000..7073092
--- /dev/null
+++ b/setuptools/command/bdist_rpm.py
@@ -0,0 +1,43 @@
+import distutils.command.bdist_rpm as orig
+
+
+class bdist_rpm(orig.bdist_rpm):
+    """
+    Override the default bdist_rpm behavior to do the following:
+
+    1. Run egg_info to ensure the name and version are properly calculated.
+    2. Always run 'install' using --single-version-externally-managed to
+       disable eggs in RPM distributions.
+    3. Replace dash with underscore in the version numbers for better RPM
+       compatibility.
+    """
+
+    def run(self):
+        # ensure distro name is up-to-date
+        self.run_command('egg_info')
+
+        orig.bdist_rpm.run(self)
+
+    def _make_spec_file(self):
+        version = self.distribution.get_version()
+        rpmversion = version.replace('-', '_')
+        spec = orig.bdist_rpm._make_spec_file(self)
+        line23 = '%define version ' + version
+        line24 = '%define version ' + rpmversion
+        spec = [
+            line.replace(
+                "Source0: %{name}-%{version}.tar",
+                "Source0: %{name}-%{unmangled_version}.tar"
+            ).replace(
+                "setup.py install ",
+                "setup.py install --single-version-externally-managed "
+            ).replace(
+                "%setup",
+                "%setup -n %{name}-%{unmangled_version}"
+            ).replace(line23, line24)
+            for line in spec
+        ]
+        insert_loc = spec.index(line24) + 1
+        unmangled_version = "%define unmangled_version " + version
+        spec.insert(insert_loc, unmangled_version)
+        return spec
diff --git a/setuptools/command/bdist_wininst.py b/setuptools/command/bdist_wininst.py
new file mode 100755
index 0000000..073de97
--- /dev/null
+++ b/setuptools/command/bdist_wininst.py
@@ -0,0 +1,21 @@
+import distutils.command.bdist_wininst as orig
+
+
+class bdist_wininst(orig.bdist_wininst):
+    def reinitialize_command(self, command, reinit_subcommands=0):
+        """
+        Supplement reinitialize_command to work around
+        http://bugs.python.org/issue20819
+        """
+        cmd = self.distribution.reinitialize_command(
+            command, reinit_subcommands)
+        if command in ('install', 'install_lib'):
+            cmd.install_lib = None
+        return cmd
+
+    def run(self):
+        self._is_running = True
+        try:
+            orig.bdist_wininst.run(self)
+        finally:
+            self._is_running = False
diff --git a/setuptools/command/build_clib.py b/setuptools/command/build_clib.py
new file mode 100644
index 0000000..09caff6
--- /dev/null
+++ b/setuptools/command/build_clib.py
@@ -0,0 +1,98 @@
+import distutils.command.build_clib as orig
+from distutils.errors import DistutilsSetupError
+from distutils import log
+from setuptools.dep_util import newer_pairwise_group
+
+
+class build_clib(orig.build_clib):
+    """
+    Override the default build_clib behaviour to do the following:
+
+    1. Implement a rudimentary timestamp-based dependency system
+       so 'compile()' doesn't run every time.
+    2. Add more keys to the 'build_info' dictionary:
+        * obj_deps - specify dependencies for each object compiled.
+                     this should be a dictionary mapping a key
+                     with the source filename to a list of
+                     dependencies. Use an empty string for global
+                     dependencies.
+        * cflags   - specify a list of additional flags to pass to
+                     the compiler.
+    """
+
+    def build_libraries(self, libraries):
+        for (lib_name, build_info) in libraries:
+            sources = build_info.get('sources')
+            if sources is None or not isinstance(sources, (list, tuple)):
+                raise DistutilsSetupError(
+                       "in 'libraries' option (library '%s'), "
+                       "'sources' must be present and must be "
+                       "a list of source filenames" % lib_name)
+            sources = list(sources)
+
+            log.info("building '%s' library", lib_name)
+
+            # Make sure everything is the correct type.
+            # obj_deps should be a dictionary of keys as sources
+            # and a list/tuple of files that are its dependencies.
+            obj_deps = build_info.get('obj_deps', dict())
+            if not isinstance(obj_deps, dict):
+                raise DistutilsSetupError(
+                       "in 'libraries' option (library '%s'), "
+                       "'obj_deps' must be a dictionary of "
+                       "type 'source: list'" % lib_name)
+            dependencies = []
+
+            # Get the global dependencies that are specified by the '' key.
+            # These will go into every source's dependency list.
+            global_deps = obj_deps.get('', list())
+            if not isinstance(global_deps, (list, tuple)):
+                raise DistutilsSetupError(
+                       "in 'libraries' option (library '%s'), "
+                       "'obj_deps' must be a dictionary of "
+                       "type 'source: list'" % lib_name)
+
+            # Build the list to be used by newer_pairwise_group
+            # each source will be auto-added to its dependencies.
+            for source in sources:
+                src_deps = [source]
+                src_deps.extend(global_deps)
+                extra_deps = obj_deps.get(source, list())
+                if not isinstance(extra_deps, (list, tuple)):
+                    raise DistutilsSetupError(
+                           "in 'libraries' option (library '%s'), "
+                           "'obj_deps' must be a dictionary of "
+                           "type 'source: list'" % lib_name)
+                src_deps.extend(extra_deps)
+                dependencies.append(src_deps)
+
+            expected_objects = self.compiler.object_filenames(
+                    sources,
+                    output_dir=self.build_temp
+                    )
+
+            if newer_pairwise_group(dependencies, expected_objects) != ([], []):
+                # First, compile the source code to object files in the library
+                # directory.  (This should probably change to putting object
+                # files in a temporary build directory.)
+                macros = build_info.get('macros')
+                include_dirs = build_info.get('include_dirs')
+                cflags = build_info.get('cflags')
+                objects = self.compiler.compile(
+                        sources,
+                        output_dir=self.build_temp,
+                        macros=macros,
+                        include_dirs=include_dirs,
+                        extra_postargs=cflags,
+                        debug=self.debug
+                        )
+
+            # Now "link" the object files together into a static library.
+            # (On Unix at least, this isn't really linking -- it just
+            # builds an archive.  Whatever.)
+            self.compiler.create_static_lib(
+                    expected_objects,
+                    lib_name,
+                    output_dir=self.build_clib,
+                    debug=self.debug
+                    )
diff --git a/setuptools/command/build_ext.py b/setuptools/command/build_ext.py
new file mode 100644
index 0000000..ea97b37
--- /dev/null
+++ b/setuptools/command/build_ext.py
@@ -0,0 +1,331 @@
+import os
+import sys
+import itertools
+import imp
+from distutils.command.build_ext import build_ext as _du_build_ext
+from distutils.file_util import copy_file
+from distutils.ccompiler import new_compiler
+from distutils.sysconfig import customize_compiler, get_config_var
+from distutils.errors import DistutilsError
+from distutils import log
+
+from setuptools.extension import Library
+from setuptools.extern import six
+
+try:
+    # Attempt to use Cython for building extensions, if available
+    from Cython.Distutils.build_ext import build_ext as _build_ext
+    # Additionally, assert that the compiler module will load
+    # also. Ref #1229.
+    __import__('Cython.Compiler.Main')
+except ImportError:
+    _build_ext = _du_build_ext
+
+# make sure _config_vars is initialized
+get_config_var("LDSHARED")
+from distutils.sysconfig import _config_vars as _CONFIG_VARS
+
+
+def _customize_compiler_for_shlib(compiler):
+    if sys.platform == "darwin":
+        # building .dylib requires additional compiler flags on OSX; here we
+        # temporarily substitute the pyconfig.h variables so that distutils'
+        # 'customize_compiler' uses them before we build the shared libraries.
+        tmp = _CONFIG_VARS.copy()
+        try:
+            # XXX Help!  I don't have any idea whether these are right...
+            _CONFIG_VARS['LDSHARED'] = (
+                "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
+            _CONFIG_VARS['CCSHARED'] = " -dynamiclib"
+            _CONFIG_VARS['SO'] = ".dylib"
+            customize_compiler(compiler)
+        finally:
+            _CONFIG_VARS.clear()
+            _CONFIG_VARS.update(tmp)
+    else:
+        customize_compiler(compiler)
+
+
+have_rtld = False
+use_stubs = False
+libtype = 'shared'
+
+if sys.platform == "darwin":
+    use_stubs = True
+elif os.name != 'nt':
+    try:
+        import dl
+        use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
+    except ImportError:
+        pass
+
+if_dl = lambda s: s if have_rtld else ''
+
+
+def get_abi3_suffix():
+    """Return the file extension for an abi3-compliant Extension()"""
+    for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION):
+        if '.abi3' in suffix:  # Unix
+            return suffix
+        elif suffix == '.pyd':  # Windows
+            return suffix
+
+
+class build_ext(_build_ext):
+    def run(self):
+        """Build extensions in build directory, then copy if --inplace"""
+        old_inplace, self.inplace = self.inplace, 0
+        _build_ext.run(self)
+        self.inplace = old_inplace
+        if old_inplace:
+            self.copy_extensions_to_source()
+
+    def copy_extensions_to_source(self):
+        build_py = self.get_finalized_command('build_py')
+        for ext in self.extensions:
+            fullname = self.get_ext_fullname(ext.name)
+            filename = self.get_ext_filename(fullname)
+            modpath = fullname.split('.')
+            package = '.'.join(modpath[:-1])
+            package_dir = build_py.get_package_dir(package)
+            dest_filename = os.path.join(package_dir,
+                                         os.path.basename(filename))
+            src_filename = os.path.join(self.build_lib, filename)
+
+            # Always copy, even if source is older than destination, to ensure
+            # that the right extensions for the current Python/platform are
+            # used.
+            copy_file(
+                src_filename, dest_filename, verbose=self.verbose,
+                dry_run=self.dry_run
+            )
+            if ext._needs_stub:
+                self.write_stub(package_dir or os.curdir, ext, True)
+
+    def get_ext_filename(self, fullname):
+        filename = _build_ext.get_ext_filename(self, fullname)
+        if fullname in self.ext_map:
+            ext = self.ext_map[fullname]
+            use_abi3 = (
+                six.PY3
+                and getattr(ext, 'py_limited_api')
+                and get_abi3_suffix()
+            )
+            if use_abi3:
+                so_ext = _get_config_var_837('EXT_SUFFIX')
+                filename = filename[:-len(so_ext)]
+                filename = filename + get_abi3_suffix()
+            if isinstance(ext, Library):
+                fn, ext = os.path.splitext(filename)
+                return self.shlib_compiler.library_filename(fn, libtype)
+            elif use_stubs and ext._links_to_dynamic:
+                d, fn = os.path.split(filename)
+                return os.path.join(d, 'dl-' + fn)
+        return filename
+
+    def initialize_options(self):
+        _build_ext.initialize_options(self)
+        self.shlib_compiler = None
+        self.shlibs = []
+        self.ext_map = {}
+
+    def finalize_options(self):
+        _build_ext.finalize_options(self)
+        self.extensions = self.extensions or []
+        self.check_extensions_list(self.extensions)
+        self.shlibs = [ext for ext in self.extensions
+                       if isinstance(ext, Library)]
+        if self.shlibs:
+            self.setup_shlib_compiler()
+        for ext in self.extensions:
+            ext._full_name = self.get_ext_fullname(ext.name)
+        for ext in self.extensions:
+            fullname = ext._full_name
+            self.ext_map[fullname] = ext
+
+            # distutils 3.1 will also ask for module names
+            # XXX what to do with conflicts?
+            self.ext_map[fullname.split('.')[-1]] = ext
+
+            ltd = self.shlibs and self.links_to_dynamic(ext) or False
+            ns = ltd and use_stubs and not isinstance(ext, Library)
+            ext._links_to_dynamic = ltd
+            ext._needs_stub = ns
+            filename = ext._file_name = self.get_ext_filename(fullname)
+            libdir = os.path.dirname(os.path.join(self.build_lib, filename))
+            if ltd and libdir not in ext.library_dirs:
+                ext.library_dirs.append(libdir)
+            if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
+                ext.runtime_library_dirs.append(os.curdir)
+
+    def setup_shlib_compiler(self):
+        compiler = self.shlib_compiler = new_compiler(
+            compiler=self.compiler, dry_run=self.dry_run, force=self.force
+        )
+        _customize_compiler_for_shlib(compiler)
+
+        if self.include_dirs is not None:
+            compiler.set_include_dirs(self.include_dirs)
+        if self.define is not None:
+            # 'define' option is a list of (name,value) tuples
+            for (name, value) in self.define:
+                compiler.define_macro(name, value)
+        if self.undef is not None:
+            for macro in self.undef:
+                compiler.undefine_macro(macro)
+        if self.libraries is not None:
+            compiler.set_libraries(self.libraries)
+        if self.library_dirs is not None:
+            compiler.set_library_dirs(self.library_dirs)
+        if self.rpath is not None:
+            compiler.set_runtime_library_dirs(self.rpath)
+        if self.link_objects is not None:
+            compiler.set_link_objects(self.link_objects)
+
+        # hack so distutils' build_extension() builds a library instead
+        compiler.link_shared_object = link_shared_object.__get__(compiler)
+
+    def get_export_symbols(self, ext):
+        if isinstance(ext, Library):
+            return ext.export_symbols
+        return _build_ext.get_export_symbols(self, ext)
+
+    def build_extension(self, ext):
+        ext._convert_pyx_sources_to_lang()
+        _compiler = self.compiler
+        try:
+            if isinstance(ext, Library):
+                self.compiler = self.shlib_compiler
+            _build_ext.build_extension(self, ext)
+            if ext._needs_stub:
+                cmd = self.get_finalized_command('build_py').build_lib
+                self.write_stub(cmd, ext)
+        finally:
+            self.compiler = _compiler
+
+    def links_to_dynamic(self, ext):
+        """Return true if 'ext' links to a dynamic lib in the same package"""
+        # XXX this should check to ensure the lib is actually being built
+        # XXX as dynamic, and not just using a locally-found version or a
+        # XXX static-compiled version
+        libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
+        pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
+        return any(pkg + libname in libnames for libname in ext.libraries)
+
+    def get_outputs(self):
+        return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
+
+    def __get_stubs_outputs(self):
+        # assemble the base name for each extension that needs a stub
+        ns_ext_bases = (
+            os.path.join(self.build_lib, *ext._full_name.split('.'))
+            for ext in self.extensions
+            if ext._needs_stub
+        )
+        # pair each base with the extension
+        pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
+        return list(base + fnext for base, fnext in pairs)
+
+    def __get_output_extensions(self):
+        yield '.py'
+        yield '.pyc'
+        if self.get_finalized_command('build_py').optimize:
+            yield '.pyo'
+
+    def write_stub(self, output_dir, ext, compile=False):
+        log.info("writing stub loader for %s to %s", ext._full_name,
+                 output_dir)
+        stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
+                     '.py')
+        if compile and os.path.exists(stub_file):
+            raise DistutilsError(stub_file + " already exists! Please delete.")
+        if not self.dry_run:
+            f = open(stub_file, 'w')
+            f.write(
+                '\n'.join([
+                    "def __bootstrap__():",
+                    "   global __bootstrap__, __file__, __loader__",
+                    "   import sys, os, pkg_resources, imp" + if_dl(", dl"),
+                    "   __file__ = pkg_resources.resource_filename"
+                    "(__name__,%r)"
+                    % os.path.basename(ext._file_name),
+                    "   del __bootstrap__",
+                    "   if '__loader__' in globals():",
+                    "       del __loader__",
+                    if_dl("   old_flags = sys.getdlopenflags()"),
+                    "   old_dir = os.getcwd()",
+                    "   try:",
+                    "     os.chdir(os.path.dirname(__file__))",
+                    if_dl("     sys.setdlopenflags(dl.RTLD_NOW)"),
+                    "     imp.load_dynamic(__name__,__file__)",
+                    "   finally:",
+                    if_dl("     sys.setdlopenflags(old_flags)"),
+                    "     os.chdir(old_dir)",
+                    "__bootstrap__()",
+                    ""  # terminal \n
+                ])
+            )
+            f.close()
+        if compile:
+            from distutils.util import byte_compile
+
+            byte_compile([stub_file], optimize=0,
+                         force=True, dry_run=self.dry_run)
+            optimize = self.get_finalized_command('install_lib').optimize
+            if optimize > 0:
+                byte_compile([stub_file], optimize=optimize,
+                             force=True, dry_run=self.dry_run)
+            if os.path.exists(stub_file) and not self.dry_run:
+                os.unlink(stub_file)
+
+
+if use_stubs or os.name == 'nt':
+    # Build shared libraries
+    #
+    def link_shared_object(
+            self, objects, output_libname, output_dir=None, libraries=None,
+            library_dirs=None, runtime_library_dirs=None, export_symbols=None,
+            debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
+            target_lang=None):
+        self.link(
+            self.SHARED_LIBRARY, objects, output_libname,
+            output_dir, libraries, library_dirs, runtime_library_dirs,
+            export_symbols, debug, extra_preargs, extra_postargs,
+            build_temp, target_lang
+        )
+else:
+    # Build static libraries everywhere else
+    libtype = 'static'
+
+    def link_shared_object(
+            self, objects, output_libname, output_dir=None, libraries=None,
+            library_dirs=None, runtime_library_dirs=None, export_symbols=None,
+            debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
+            target_lang=None):
+        # XXX we need to either disallow these attrs on Library instances,
+        # or warn/abort here if set, or something...
+        # libraries=None, library_dirs=None, runtime_library_dirs=None,
+        # export_symbols=None, extra_preargs=None, extra_postargs=None,
+        # build_temp=None
+
+        assert output_dir is None  # distutils build_ext doesn't pass this
+        output_dir, filename = os.path.split(output_libname)
+        basename, ext = os.path.splitext(filename)
+        if self.library_filename("x").startswith('lib'):
+            # strip 'lib' prefix; this is kludgy if some platform uses
+            # a different prefix
+            basename = basename[3:]
+
+        self.create_static_lib(
+            objects, basename, output_dir, debug, target_lang
+        )
+
+
+def _get_config_var_837(name):
+    """
+    In https://github.com/pypa/setuptools/pull/837, we discovered
+    Python 3.3.0 exposes the extension suffix under the name 'SO'.
+    """
+    if sys.version_info < (3, 3, 1):
+        name = 'SO'
+    return get_config_var(name)
diff --git a/setuptools/command/build_py.py b/setuptools/command/build_py.py
new file mode 100644
index 0000000..b0314fd
--- /dev/null
+++ b/setuptools/command/build_py.py
@@ -0,0 +1,270 @@
+from glob import glob
+from distutils.util import convert_path
+import distutils.command.build_py as orig
+import os
+import fnmatch
+import textwrap
+import io
+import distutils.errors
+import itertools
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import map, filter, filterfalse
+
+try:
+    from setuptools.lib2to3_ex import Mixin2to3
+except ImportError:
+
+    class Mixin2to3:
+        def run_2to3(self, files, doctests=True):
+            "do nothing"
+
+
+class build_py(orig.build_py, Mixin2to3):
+    """Enhanced 'build_py' command that includes data files with packages
+
+    The data files are specified via a 'package_data' argument to 'setup()'.
+    See 'setuptools.dist.Distribution' for more details.
+
+    Also, this version of the 'build_py' command allows you to specify both
+    'py_modules' and 'packages' in the same setup operation.
+    """
+
+    def finalize_options(self):
+        orig.build_py.finalize_options(self)
+        self.package_data = self.distribution.package_data
+        self.exclude_package_data = (self.distribution.exclude_package_data or
+                                     {})
+        if 'data_files' in self.__dict__:
+            del self.__dict__['data_files']
+        self.__updated_files = []
+        self.__doctests_2to3 = []
+
+    def run(self):
+        """Build modules, packages, and copy data files to build directory"""
+        if not self.py_modules and not self.packages:
+            return
+
+        if self.py_modules:
+            self.build_modules()
+
+        if self.packages:
+            self.build_packages()
+            self.build_package_data()
+
+        self.run_2to3(self.__updated_files, False)
+        self.run_2to3(self.__updated_files, True)
+        self.run_2to3(self.__doctests_2to3, True)
+
+        # Only compile actual .py files, using our base class' idea of what our
+        # output files are.
+        self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
+
+    def __getattr__(self, attr):
+        "lazily compute data files"
+        if attr == 'data_files':
+            self.data_files = self._get_data_files()
+            return self.data_files
+        return orig.build_py.__getattr__(self, attr)
+
+    def build_module(self, module, module_file, package):
+        if six.PY2 and isinstance(package, six.string_types):
+            # avoid errors on Python 2 when unicode is passed (#190)
+            package = package.split('.')
+        outfile, copied = orig.build_py.build_module(self, module, module_file,
+                                                     package)
+        if copied:
+            self.__updated_files.append(outfile)
+        return outfile, copied
+
+    def _get_data_files(self):
+        """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
+        self.analyze_manifest()
+        return list(map(self._get_pkg_data_files, self.packages or ()))
+
+    def _get_pkg_data_files(self, package):
+        # Locate package source directory
+        src_dir = self.get_package_dir(package)
+
+        # Compute package build directory
+        build_dir = os.path.join(*([self.build_lib] + package.split('.')))
+
+        # Strip directory from globbed filenames
+        filenames = [
+            os.path.relpath(file, src_dir)
+            for file in self.find_data_files(package, src_dir)
+        ]
+        return package, src_dir, build_dir, filenames
+
+    def find_data_files(self, package, src_dir):
+        """Return filenames for package's data files in 'src_dir'"""
+        patterns = self._get_platform_patterns(
+            self.package_data,
+            package,
+            src_dir,
+        )
+        globs_expanded = map(glob, patterns)
+        # flatten the expanded globs into an iterable of matches
+        globs_matches = itertools.chain.from_iterable(globs_expanded)
+        glob_files = filter(os.path.isfile, globs_matches)
+        files = itertools.chain(
+            self.manifest_files.get(package, []),
+            glob_files,
+        )
+        return self.exclude_data_files(package, src_dir, files)
+
+    def build_package_data(self):
+        """Copy data files into build directory"""
+        for package, src_dir, build_dir, filenames in self.data_files:
+            for filename in filenames:
+                target = os.path.join(build_dir, filename)
+                self.mkpath(os.path.dirname(target))
+                srcfile = os.path.join(src_dir, filename)
+                outf, copied = self.copy_file(srcfile, target)
+                srcfile = os.path.abspath(srcfile)
+                if (copied and
+                        srcfile in self.distribution.convert_2to3_doctests):
+                    self.__doctests_2to3.append(outf)
+
+    def analyze_manifest(self):
+        self.manifest_files = mf = {}
+        if not self.distribution.include_package_data:
+            return
+        src_dirs = {}
+        for package in self.packages or ():
+            # Locate package source directory
+            src_dirs[assert_relative(self.get_package_dir(package))] = package
+
+        self.run_command('egg_info')
+        ei_cmd = self.get_finalized_command('egg_info')
+        for path in ei_cmd.filelist.files:
+            d, f = os.path.split(assert_relative(path))
+            prev = None
+            oldf = f
+            while d and d != prev and d not in src_dirs:
+                prev = d
+                d, df = os.path.split(d)
+                f = os.path.join(df, f)
+            if d in src_dirs:
+                if path.endswith('.py') and f == oldf:
+                    continue  # it's a module, not data
+                mf.setdefault(src_dirs[d], []).append(path)
+
+    def get_data_files(self):
+        pass  # Lazily compute data files in _get_data_files() function.
+
+    def check_package(self, package, package_dir):
+        """Check namespace packages' __init__ for declare_namespace"""
+        try:
+            return self.packages_checked[package]
+        except KeyError:
+            pass
+
+        init_py = orig.build_py.check_package(self, package, package_dir)
+        self.packages_checked[package] = init_py
+
+        if not init_py or not self.distribution.namespace_packages:
+            return init_py
+
+        for pkg in self.distribution.namespace_packages:
+            if pkg == package or pkg.startswith(package + '.'):
+                break
+        else:
+            return init_py
+
+        with io.open(init_py, 'rb') as f:
+            contents = f.read()
+        if b'declare_namespace' not in contents:
+            raise distutils.errors.DistutilsError(
+                "Namespace package problem: %s is a namespace package, but "
+                "its\n__init__.py does not call declare_namespace()! Please "
+                'fix it.\n(See the setuptools manual under '
+                '"Namespace Packages" for details.)\n"' % (package,)
+            )
+        return init_py
+
+    def initialize_options(self):
+        self.packages_checked = {}
+        orig.build_py.initialize_options(self)
+
+    def get_package_dir(self, package):
+        res = orig.build_py.get_package_dir(self, package)
+        if self.distribution.src_root is not None:
+            return os.path.join(self.distribution.src_root, res)
+        return res
+
+    def exclude_data_files(self, package, src_dir, files):
+        """Filter filenames for package's data files in 'src_dir'"""
+        files = list(files)
+        patterns = self._get_platform_patterns(
+            self.exclude_package_data,
+            package,
+            src_dir,
+        )
+        match_groups = (
+            fnmatch.filter(files, pattern)
+            for pattern in patterns
+        )
+        # flatten the groups of matches into an iterable of matches
+        matches = itertools.chain.from_iterable(match_groups)
+        bad = set(matches)
+        keepers = (
+            fn
+            for fn in files
+            if fn not in bad
+        )
+        # ditch dupes
+        return list(_unique_everseen(keepers))
+
+    @staticmethod
+    def _get_platform_patterns(spec, package, src_dir):
+        """
+        yield platform-specific path patterns (suitable for glob
+        or fn_match) from a glob-based spec (such as
+        self.package_data or self.exclude_package_data)
+        matching package in src_dir.
+        """
+        raw_patterns = itertools.chain(
+            spec.get('', []),
+            spec.get(package, []),
+        )
+        return (
+            # Each pattern has to be converted to a platform-specific path
+            os.path.join(src_dir, convert_path(pattern))
+            for pattern in raw_patterns
+        )
+
+
+# from Python docs
+def _unique_everseen(iterable, key=None):
+    "List unique elements, preserving order. Remember all elements ever seen."
+    # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+    # unique_everseen('ABBCcAD', str.lower) --> A B C D
+    seen = set()
+    seen_add = seen.add
+    if key is None:
+        for element in filterfalse(seen.__contains__, iterable):
+            seen_add(element)
+            yield element
+    else:
+        for element in iterable:
+            k = key(element)
+            if k not in seen:
+                seen_add(k)
+                yield element
+
+
+def assert_relative(path):
+    if not os.path.isabs(path):
+        return path
+    from distutils.errors import DistutilsSetupError
+
+    msg = textwrap.dedent("""
+        Error: setup script specifies an absolute path:
+
+            %s
+
+        setup() arguments must *always* be /-separated paths relative to the
+        setup.py directory, *never* absolute paths.
+        """).lstrip() % path
+    raise DistutilsSetupError(msg)
diff --git a/setuptools/command/develop.py b/setuptools/command/develop.py
new file mode 100755
index 0000000..959c932
--- /dev/null
+++ b/setuptools/command/develop.py
@@ -0,0 +1,216 @@
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsError, DistutilsOptionError
+import os
+import glob
+import io
+
+from setuptools.extern import six
+
+from pkg_resources import Distribution, PathMetadata, normalize_path
+from setuptools.command.easy_install import easy_install
+from setuptools import namespaces
+import setuptools
+
+
+class develop(namespaces.DevelopInstaller, easy_install):
+    """Set up package for development"""
+
+    description = "install package in 'development mode'"
+
+    user_options = easy_install.user_options + [
+        ("uninstall", "u", "Uninstall this source package"),
+        ("egg-path=", None, "Set the path to be used in the .egg-link file"),
+    ]
+
+    boolean_options = easy_install.boolean_options + ['uninstall']
+
+    command_consumes_arguments = False  # override base
+
+    def run(self):
+        if self.uninstall:
+            self.multi_version = True
+            self.uninstall_link()
+            self.uninstall_namespaces()
+        else:
+            self.install_for_development()
+        self.warn_deprecated_options()
+
+    def initialize_options(self):
+        self.uninstall = None
+        self.egg_path = None
+        easy_install.initialize_options(self)
+        self.setup_path = None
+        self.always_copy_from = '.'  # always copy eggs installed in curdir
+
+    def finalize_options(self):
+        ei = self.get_finalized_command("egg_info")
+        if ei.broken_egg_info:
+            template = "Please rename %r to %r before using 'develop'"
+            args = ei.egg_info, ei.broken_egg_info
+            raise DistutilsError(template % args)
+        self.args = [ei.egg_name]
+
+        easy_install.finalize_options(self)
+        self.expand_basedirs()
+        self.expand_dirs()
+        # pick up setup-dir .egg files only: no .egg-info
+        self.package_index.scan(glob.glob('*.egg'))
+
+        egg_link_fn = ei.egg_name + '.egg-link'
+        self.egg_link = os.path.join(self.install_dir, egg_link_fn)
+        self.egg_base = ei.egg_base
+        if self.egg_path is None:
+            self.egg_path = os.path.abspath(ei.egg_base)
+
+        target = normalize_path(self.egg_base)
+        egg_path = normalize_path(os.path.join(self.install_dir,
+                                               self.egg_path))
+        if egg_path != target:
+            raise DistutilsOptionError(
+                "--egg-path must be a relative path from the install"
+                " directory to " + target
+            )
+
+        # Make a distribution for the package's source
+        self.dist = Distribution(
+            target,
+            PathMetadata(target, os.path.abspath(ei.egg_info)),
+            project_name=ei.egg_name
+        )
+
+        self.setup_path = self._resolve_setup_path(
+            self.egg_base,
+            self.install_dir,
+            self.egg_path,
+        )
+
+    @staticmethod
+    def _resolve_setup_path(egg_base, install_dir, egg_path):
+        """
+        Generate a path from egg_base back to '.' where the
+        setup script resides and ensure that path points to the
+        setup path from $install_dir/$egg_path.
+        """
+        path_to_setup = egg_base.replace(os.sep, '/').rstrip('/')
+        if path_to_setup != os.curdir:
+            path_to_setup = '../' * (path_to_setup.count('/') + 1)
+        resolved = normalize_path(
+            os.path.join(install_dir, egg_path, path_to_setup)
+        )
+        if resolved != normalize_path(os.curdir):
+            raise DistutilsOptionError(
+                "Can't get a consistent path to setup script from"
+                " installation directory", resolved, normalize_path(os.curdir))
+        return path_to_setup
+
+    def install_for_development(self):
+        if six.PY3 and getattr(self.distribution, 'use_2to3', False):
+            # If we run 2to3 we can not do this inplace:
+
+            # Ensure metadata is up-to-date
+            self.reinitialize_command('build_py', inplace=0)
+            self.run_command('build_py')
+            bpy_cmd = self.get_finalized_command("build_py")
+            build_path = normalize_path(bpy_cmd.build_lib)
+
+            # Build extensions
+            self.reinitialize_command('egg_info', egg_base=build_path)
+            self.run_command('egg_info')
+
+            self.reinitialize_command('build_ext', inplace=0)
+            self.run_command('build_ext')
+
+            # Fixup egg-link and easy-install.pth
+            ei_cmd = self.get_finalized_command("egg_info")
+            self.egg_path = build_path
+            self.dist.location = build_path
+            # XXX
+            self.dist._provider = PathMetadata(build_path, ei_cmd.egg_info)
+        else:
+            # Without 2to3 inplace works fine:
+            self.run_command('egg_info')
+
+            # Build extensions in-place
+            self.reinitialize_command('build_ext', inplace=1)
+            self.run_command('build_ext')
+
+        self.install_site_py()  # ensure that target dir is site-safe
+        if setuptools.bootstrap_install_from:
+            self.easy_install(setuptools.bootstrap_install_from)
+            setuptools.bootstrap_install_from = None
+
+        self.install_namespaces()
+
+        # create an .egg-link in the installation dir, pointing to our egg
+        log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
+        if not self.dry_run:
+            with open(self.egg_link, "w") as f:
+                f.write(self.egg_path + "\n" + self.setup_path)
+        # postprocess the installed distro, fixing up .pth, installing scripts,
+        # and handling requirements
+        self.process_distribution(None, self.dist, not self.no_deps)
+
+    def uninstall_link(self):
+        if os.path.exists(self.egg_link):
+            log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
+            egg_link_file = open(self.egg_link)
+            contents = [line.rstrip() for line in egg_link_file]
+            egg_link_file.close()
+            if contents not in ([self.egg_path],
+                                [self.egg_path, self.setup_path]):
+                log.warn("Link points to %s: uninstall aborted", contents)
+                return
+            if not self.dry_run:
+                os.unlink(self.egg_link)
+        if not self.dry_run:
+            self.update_pth(self.dist)  # remove any .pth link to us
+        if self.distribution.scripts:
+            # XXX should also check for entry point scripts!
+            log.warn("Note: you must uninstall or replace scripts manually!")
+
+    def install_egg_scripts(self, dist):
+        if dist is not self.dist:
+            # Installing a dependency, so fall back to normal behavior
+            return easy_install.install_egg_scripts(self, dist)
+
+        # create wrapper scripts in the script dir, pointing to dist.scripts
+
+        # new-style...
+        self.install_wrapper_scripts(dist)
+
+        # ...and old-style
+        for script_name in self.distribution.scripts or []:
+            script_path = os.path.abspath(convert_path(script_name))
+            script_name = os.path.basename(script_path)
+            with io.open(script_path) as strm:
+                script_text = strm.read()
+            self.install_script(dist, script_name, script_text, script_path)
+
+    def install_wrapper_scripts(self, dist):
+        dist = VersionlessRequirement(dist)
+        return easy_install.install_wrapper_scripts(self, dist)
+
+
+class VersionlessRequirement(object):
+    """
+    Adapt a pkg_resources.Distribution to simply return the project
+    name as the 'requirement' so that scripts will work across
+    multiple versions.
+
+    >>> dist = Distribution(project_name='foo', version='1.0')
+    >>> str(dist.as_requirement())
+    'foo==1.0'
+    >>> adapted_dist = VersionlessRequirement(dist)
+    >>> str(adapted_dist.as_requirement())
+    'foo'
+    """
+
+    def __init__(self, dist):
+        self.__dist = dist
+
+    def __getattr__(self, name):
+        return getattr(self.__dist, name)
+
+    def as_requirement(self):
+        return self.project_name
diff --git a/setuptools/command/dist_info.py b/setuptools/command/dist_info.py
new file mode 100644
index 0000000..c45258f
--- /dev/null
+++ b/setuptools/command/dist_info.py
@@ -0,0 +1,36 @@
+"""
+Create a dist_info directory
+As defined in the wheel specification
+"""
+
+import os
+
+from distutils.core import Command
+from distutils import log
+
+
+class dist_info(Command):
+
+    description = 'create a .dist-info directory'
+
+    user_options = [
+        ('egg-base=', 'e', "directory containing .egg-info directories"
+                           " (default: top of the source tree)"),
+    ]
+
+    def initialize_options(self):
+        self.egg_base = None
+
+    def finalize_options(self):
+        pass
+
+    def run(self):
+        egg_info = self.get_finalized_command('egg_info')
+        egg_info.egg_base = self.egg_base
+        egg_info.finalize_options()
+        egg_info.run()
+        dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info'
+        log.info("creating '{}'".format(os.path.abspath(dist_info_dir)))
+
+        bdist_wheel = self.get_finalized_command('bdist_wheel')
+        bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir)
diff --git a/setuptools/command/easy_install.py b/setuptools/command/easy_install.py
new file mode 100755
index 0000000..85ee40f
--- /dev/null
+++ b/setuptools/command/easy_install.py
@@ -0,0 +1,2334 @@
+#!/usr/bin/env python
+"""
+Easy Install
+------------
+
+A tool for doing automatic download/extract/build of distutils-based Python
+packages.  For detailed documentation, see the accompanying EasyInstall.txt
+file, or visit the `EasyInstall home page`__.
+
+__ https://setuptools.readthedocs.io/en/latest/easy_install.html
+
+"""
+
+from glob import glob
+from distutils.util import get_platform
+from distutils.util import convert_path, subst_vars
+from distutils.errors import (
+    DistutilsArgError, DistutilsOptionError,
+    DistutilsError, DistutilsPlatformError,
+)
+from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
+from distutils import log, dir_util
+from distutils.command.build_scripts import first_line_re
+from distutils.spawn import find_executable
+import sys
+import os
+import zipimport
+import shutil
+import tempfile
+import zipfile
+import re
+import stat
+import random
+import textwrap
+import warnings
+import site
+import struct
+import contextlib
+import subprocess
+import shlex
+import io
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import configparser, map
+
+from setuptools import Command
+from setuptools.sandbox import run_setup
+from setuptools.py31compat import get_path, get_config_vars
+from setuptools.py27compat import rmtree_safe
+from setuptools.command import setopt
+from setuptools.archive_util import unpack_archive
+from setuptools.package_index import (
+    PackageIndex, parse_requirement_arg, URL_SCHEME,
+)
+from setuptools.command import bdist_egg, egg_info
+from setuptools.wheel import Wheel
+from pkg_resources import (
+    yield_lines, normalize_path, resource_string, ensure_directory,
+    get_distribution, find_distributions, Environment, Requirement,
+    Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
+    VersionConflict, DEVELOP_DIST,
+)
+import pkg_resources.py31compat
+
+# Turn on PEP440Warnings
+warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
+
+__all__ = [
+    'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
+    'main', 'get_exe_prefixes',
+]
+
+
+def is_64bit():
+    return struct.calcsize("P") == 8
+
+
+def samefile(p1, p2):
+    """
+    Determine if two paths reference the same file.
+
+    Augments os.path.samefile to work on Windows and
+    suppresses errors if the path doesn't exist.
+    """
+    both_exist = os.path.exists(p1) and os.path.exists(p2)
+    use_samefile = hasattr(os.path, 'samefile') and both_exist
+    if use_samefile:
+        return os.path.samefile(p1, p2)
+    norm_p1 = os.path.normpath(os.path.normcase(p1))
+    norm_p2 = os.path.normpath(os.path.normcase(p2))
+    return norm_p1 == norm_p2
+
+
+if six.PY2:
+
+    def _to_ascii(s):
+        return s
+
+    def isascii(s):
+        try:
+            six.text_type(s, 'ascii')
+            return True
+        except UnicodeError:
+            return False
+else:
+
+    def _to_ascii(s):
+        return s.encode('ascii')
+
+    def isascii(s):
+        try:
+            s.encode('ascii')
+            return True
+        except UnicodeError:
+            return False
+
+
+_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
+
+
+class easy_install(Command):
+    """Manage a download/build/install process"""
+    description = "Find/get/install Python packages"
+    command_consumes_arguments = True
+
+    user_options = [
+        ('prefix=', None, "installation prefix"),
+        ("zip-ok", "z", "install package as a zipfile"),
+        ("multi-version", "m", "make apps have to require() a version"),
+        ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
+        ("install-dir=", "d", "install package to DIR"),
+        ("script-dir=", "s", "install scripts to DIR"),
+        ("exclude-scripts", "x", "Don't install scripts"),
+        ("always-copy", "a", "Copy all needed packages to install dir"),
+        ("index-url=", "i", "base URL of Python Package Index"),
+        ("find-links=", "f", "additional URL(s) to search for packages"),
+        ("build-directory=", "b",
+         "download/extract/build in DIR; keep the results"),
+        ('optimize=', 'O',
+         "also compile with optimization: -O1 for \"python -O\", "
+         "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+        ('record=', None,
+         "filename in which to record list of installed files"),
+        ('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
+        ('site-dirs=', 'S', "list of directories where .pth files work"),
+        ('editable', 'e', "Install specified packages in editable form"),
+        ('no-deps', 'N', "don't install dependencies"),
+        ('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
+        ('local-snapshots-ok', 'l',
+         "allow building eggs from local checkouts"),
+        ('version', None, "print version information and exit"),
+        ('no-find-links', None,
+         "Don't load find-links defined in packages being installed")
+    ]
+    boolean_options = [
+        'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
+        'editable',
+        'no-deps', 'local-snapshots-ok', 'version'
+    ]
+
+    if site.ENABLE_USER_SITE:
+        help_msg = "install in user site-package '%s'" % site.USER_SITE
+        user_options.append(('user', None, help_msg))
+        boolean_options.append('user')
+
+    negative_opt = {'always-unzip': 'zip-ok'}
+    create_index = PackageIndex
+
+    def initialize_options(self):
+        # the --user option seems to be an opt-in one,
+        # so the default should be False.
+        self.user = 0
+        self.zip_ok = self.local_snapshots_ok = None
+        self.install_dir = self.script_dir = self.exclude_scripts = None
+        self.index_url = None
+        self.find_links = None
+        self.build_directory = None
+        self.args = None
+        self.optimize = self.record = None
+        self.upgrade = self.always_copy = self.multi_version = None
+        self.editable = self.no_deps = self.allow_hosts = None
+        self.root = self.prefix = self.no_report = None
+        self.version = None
+        self.install_purelib = None  # for pure module distributions
+        self.install_platlib = None  # non-pure (dists w/ extensions)
+        self.install_headers = None  # for C/C++ headers
+        self.install_lib = None  # set to either purelib or platlib
+        self.install_scripts = None
+        self.install_data = None
+        self.install_base = None
+        self.install_platbase = None
+        if site.ENABLE_USER_SITE:
+            self.install_userbase = site.USER_BASE
+            self.install_usersite = site.USER_SITE
+        else:
+            self.install_userbase = None
+            self.install_usersite = None
+        self.no_find_links = None
+
+        # Options not specifiable via command line
+        self.package_index = None
+        self.pth_file = self.always_copy_from = None
+        self.site_dirs = None
+        self.installed_projects = {}
+        self.sitepy_installed = False
+        # Always read easy_install options, even if we are subclassed, or have
+        # an independent instance created.  This ensures that defaults will
+        # always come from the standard configuration file(s)' "easy_install"
+        # section, even if this is a "develop" or "install" command, or some
+        # other embedding.
+        self._dry_run = None
+        self.verbose = self.distribution.verbose
+        self.distribution._set_command_options(
+            self, self.distribution.get_option_dict('easy_install')
+        )
+
+    def delete_blockers(self, blockers):
+        extant_blockers = (
+            filename for filename in blockers
+            if os.path.exists(filename) or os.path.islink(filename)
+        )
+        list(map(self._delete_path, extant_blockers))
+
+    def _delete_path(self, path):
+        log.info("Deleting %s", path)
+        if self.dry_run:
+            return
+
+        is_tree = os.path.isdir(path) and not os.path.islink(path)
+        remover = rmtree if is_tree else os.unlink
+        remover(path)
+
+    @staticmethod
+    def _render_version():
+        """
+        Render the Setuptools version and installation details, then exit.
+        """
+        ver = sys.version[:3]
+        dist = get_distribution('setuptools')
+        tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
+        print(tmpl.format(**locals()))
+        raise SystemExit()
+
+    def finalize_options(self):
+        self.version and self._render_version()
+
+        py_version = sys.version.split()[0]
+        prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
+
+        self.config_vars = {
+            'dist_name': self.distribution.get_name(),
+            'dist_version': self.distribution.get_version(),
+            'dist_fullname': self.distribution.get_fullname(),
+            'py_version': py_version,
+            'py_version_short': py_version[0:3],
+            'py_version_nodot': py_version[0] + py_version[2],
+            'sys_prefix': prefix,
+            'prefix': prefix,
+            'sys_exec_prefix': exec_prefix,
+            'exec_prefix': exec_prefix,
+            # Only python 3.2+ has abiflags
+            'abiflags': getattr(sys, 'abiflags', ''),
+        }
+
+        if site.ENABLE_USER_SITE:
+            self.config_vars['userbase'] = self.install_userbase
+            self.config_vars['usersite'] = self.install_usersite
+
+        self._fix_install_dir_for_user_site()
+
+        self.expand_basedirs()
+        self.expand_dirs()
+
+        self._expand(
+            'install_dir', 'script_dir', 'build_directory',
+            'site_dirs',
+        )
+        # If a non-default installation directory was specified, default the
+        # script directory to match it.
+        if self.script_dir is None:
+            self.script_dir = self.install_dir
+
+        if self.no_find_links is None:
+            self.no_find_links = False
+
+        # Let install_dir get set by install_lib command, which in turn
+        # gets its info from the install command, and takes into account
+        # --prefix and --home and all that other crud.
+        self.set_undefined_options(
+            'install_lib', ('install_dir', 'install_dir')
+        )
+        # Likewise, set default script_dir from 'install_scripts.install_dir'
+        self.set_undefined_options(
+            'install_scripts', ('install_dir', 'script_dir')
+        )
+
+        if self.user and self.install_purelib:
+            self.install_dir = self.install_purelib
+            self.script_dir = self.install_scripts
+        # default --record from the install command
+        self.set_undefined_options('install', ('record', 'record'))
+        # Should this be moved to the if statement below? It's not used
+        # elsewhere
+        normpath = map(normalize_path, sys.path)
+        self.all_site_dirs = get_site_dirs()
+        if self.site_dirs is not None:
+            site_dirs = [
+                os.path.expanduser(s.strip()) for s in
+                self.site_dirs.split(',')
+            ]
+            for d in site_dirs:
+                if not os.path.isdir(d):
+                    log.warn("%s (in --site-dirs) does not exist", d)
+                elif normalize_path(d) not in normpath:
+                    raise DistutilsOptionError(
+                        d + " (in --site-dirs) is not on sys.path"
+                    )
+                else:
+                    self.all_site_dirs.append(normalize_path(d))
+        if not self.editable:
+            self.check_site_dir()
+        self.index_url = self.index_url or "https://pypi.org/simple/"
+        self.shadow_path = self.all_site_dirs[:]
+        for path_item in self.install_dir, normalize_path(self.script_dir):
+            if path_item not in self.shadow_path:
+                self.shadow_path.insert(0, path_item)
+
+        if self.allow_hosts is not None:
+            hosts = [s.strip() for s in self.allow_hosts.split(',')]
+        else:
+            hosts = ['*']
+        if self.package_index is None:
+            self.package_index = self.create_index(
+                self.index_url, search_path=self.shadow_path, hosts=hosts,
+            )
+        self.local_index = Environment(self.shadow_path + sys.path)
+
+        if self.find_links is not None:
+            if isinstance(self.find_links, six.string_types):
+                self.find_links = self.find_links.split()
+        else:
+            self.find_links = []
+        if self.local_snapshots_ok:
+            self.package_index.scan_egg_links(self.shadow_path + sys.path)
+        if not self.no_find_links:
+            self.package_index.add_find_links(self.find_links)
+        self.set_undefined_options('install_lib', ('optimize', 'optimize'))
+        if not isinstance(self.optimize, int):
+            try:
+                self.optimize = int(self.optimize)
+                if not (0 <= self.optimize <= 2):
+                    raise ValueError
+            except ValueError:
+                raise DistutilsOptionError("--optimize must be 0, 1, or 2")
+
+        if self.editable and not self.build_directory:
+            raise DistutilsArgError(
+                "Must specify a build directory (-b) when using --editable"
+            )
+        if not self.args:
+            raise DistutilsArgError(
+                "No urls, filenames, or requirements specified (see --help)")
+
+        self.outputs = []
+
+    def _fix_install_dir_for_user_site(self):
+        """
+        Fix the install_dir if "--user" was used.
+        """
+        if not self.user or not site.ENABLE_USER_SITE:
+            return
+
+        self.create_home_path()
+        if self.install_userbase is None:
+            msg = "User base directory is not specified"
+            raise DistutilsPlatformError(msg)
+        self.install_base = self.install_platbase = self.install_userbase
+        scheme_name = os.name.replace('posix', 'unix') + '_user'
+        self.select_scheme(scheme_name)
+
+    def _expand_attrs(self, attrs):
+        for attr in attrs:
+            val = getattr(self, attr)
+            if val is not None:
+                if os.name == 'posix' or os.name == 'nt':
+                    val = os.path.expanduser(val)
+                val = subst_vars(val, self.config_vars)
+                setattr(self, attr, val)
+
+    def expand_basedirs(self):
+        """Calls `os.path.expanduser` on install_base, install_platbase and
+        root."""
+        self._expand_attrs(['install_base', 'install_platbase', 'root'])
+
+    def expand_dirs(self):
+        """Calls `os.path.expanduser` on install dirs."""
+        dirs = [
+            'install_purelib',
+            'install_platlib',
+            'install_lib',
+            'install_headers',
+            'install_scripts',
+            'install_data',
+        ]
+        self._expand_attrs(dirs)
+
+    def run(self):
+        if self.verbose != self.distribution.verbose:
+            log.set_verbosity(self.verbose)
+        try:
+            for spec in self.args:
+                self.easy_install(spec, not self.no_deps)
+            if self.record:
+                outputs = self.outputs
+                if self.root:  # strip any package prefix
+                    root_len = len(self.root)
+                    for counter in range(len(outputs)):
+                        outputs[counter] = outputs[counter][root_len:]
+                from distutils import file_util
+
+                self.execute(
+                    file_util.write_file, (self.record, outputs),
+                    "writing list of installed files to '%s'" %
+                    self.record
+                )
+            self.warn_deprecated_options()
+        finally:
+            log.set_verbosity(self.distribution.verbose)
+
+    def pseudo_tempname(self):
+        """Return a pseudo-tempname base in the install directory.
+        This code is intentionally naive; if a malicious party can write to
+        the target directory you're already in deep doodoo.
+        """
+        try:
+            pid = os.getpid()
+        except Exception:
+            pid = random.randint(0, sys.maxsize)
+        return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
+
+    def warn_deprecated_options(self):
+        pass
+
+    def check_site_dir(self):
+        """Verify that self.install_dir is .pth-capable dir, if needed"""
+
+        instdir = normalize_path(self.install_dir)
+        pth_file = os.path.join(instdir, 'easy-install.pth')
+
+        # Is it a configured, PYTHONPATH, implicit, or explicit site dir?
+        is_site_dir = instdir in self.all_site_dirs
+
+        if not is_site_dir and not self.multi_version:
+            # No?  Then directly test whether it does .pth file processing
+            is_site_dir = self.check_pth_processing()
+        else:
+            # make sure we can write to target dir
+            testfile = self.pseudo_tempname() + '.write-test'
+            test_exists = os.path.exists(testfile)
+            try:
+                if test_exists:
+                    os.unlink(testfile)
+                open(testfile, 'w').close()
+                os.unlink(testfile)
+            except (OSError, IOError):
+                self.cant_write_to_target()
+
+        if not is_site_dir and not self.multi_version:
+            # Can't install non-multi to non-site dir
+            raise DistutilsError(self.no_default_version_msg())
+
+        if is_site_dir:
+            if self.pth_file is None:
+                self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
+        else:
+            self.pth_file = None
+
+        if instdir not in map(normalize_path, _pythonpath()):
+            # only PYTHONPATH dirs need a site.py, so pretend it's there
+            self.sitepy_installed = True
+        elif self.multi_version and not os.path.exists(pth_file):
+            self.sitepy_installed = True  # don't need site.py in this case
+            self.pth_file = None  # and don't create a .pth file
+        self.install_dir = instdir
+
+    __cant_write_msg = textwrap.dedent("""
+        can't create or remove files in install directory
+
+        The following error occurred while trying to add or remove files in the
+        installation directory:
+
+            %s
+
+        The installation directory you specified (via --install-dir, --prefix, or
+        the distutils default setting) was:
+
+            %s
+        """).lstrip()
+
+    __not_exists_id = textwrap.dedent("""
+        This directory does not currently exist.  Please create it and try again, or
+        choose a different installation directory (using the -d or --install-dir
+        option).
+        """).lstrip()
+
+    __access_msg = textwrap.dedent("""
+        Perhaps your account does not have write access to this directory?  If the
+        installation directory is a system-owned directory, you may need to sign in
+        as the administrator or "root" account.  If you do not have administrative
+        access to this machine, you may wish to choose a different installation
+        directory, preferably one that is listed in your PYTHONPATH environment
+        variable.
+
+        For information on other options, you may wish to consult the
+        documentation at:
+
+          https://setuptools.readthedocs.io/en/latest/easy_install.html
+
+        Please make the appropriate changes for your system and try again.
+        """).lstrip()
+
+    def cant_write_to_target(self):
+        msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
+
+        if not os.path.exists(self.install_dir):
+            msg += '\n' + self.__not_exists_id
+        else:
+            msg += '\n' + self.__access_msg
+        raise DistutilsError(msg)
+
+    def check_pth_processing(self):
+        """Empirically verify whether .pth files are supported in inst. dir"""
+        instdir = self.install_dir
+        log.info("Checking .pth file support in %s", instdir)
+        pth_file = self.pseudo_tempname() + ".pth"
+        ok_file = pth_file + '.ok'
+        ok_exists = os.path.exists(ok_file)
+        tmpl = _one_liner("""
+            import os
+            f = open({ok_file!r}, 'w')
+            f.write('OK')
+            f.close()
+            """) + '\n'
+        try:
+            if ok_exists:
+                os.unlink(ok_file)
+            dirname = os.path.dirname(ok_file)
+            pkg_resources.py31compat.makedirs(dirname, exist_ok=True)
+            f = open(pth_file, 'w')
+        except (OSError, IOError):
+            self.cant_write_to_target()
+        else:
+            try:
+                f.write(tmpl.format(**locals()))
+                f.close()
+                f = None
+                executable = sys.executable
+                if os.name == 'nt':
+                    dirname, basename = os.path.split(executable)
+                    alt = os.path.join(dirname, 'pythonw.exe')
+                    use_alt = (
+                        basename.lower() == 'python.exe' and
+                        os.path.exists(alt)
+                    )
+                    if use_alt:
+                        # use pythonw.exe to avoid opening a console window
+                        executable = alt
+
+                from distutils.spawn import spawn
+
+                spawn([executable, '-E', '-c', 'pass'], 0)
+
+                if os.path.exists(ok_file):
+                    log.info(
+                        "TEST PASSED: %s appears to support .pth files",
+                        instdir
+                    )
+                    return True
+            finally:
+                if f:
+                    f.close()
+                if os.path.exists(ok_file):
+                    os.unlink(ok_file)
+                if os.path.exists(pth_file):
+                    os.unlink(pth_file)
+        if not self.multi_version:
+            log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
+        return False
+
+    def install_egg_scripts(self, dist):
+        """Write all the scripts for `dist`, unless scripts are excluded"""
+        if not self.exclude_scripts and dist.metadata_isdir('scripts'):
+            for script_name in dist.metadata_listdir('scripts'):
+                if dist.metadata_isdir('scripts/' + script_name):
+                    # The "script" is a directory, likely a Python 3
+                    # __pycache__ directory, so skip it.
+                    continue
+                self.install_script(
+                    dist, script_name,
+                    dist.get_metadata('scripts/' + script_name)
+                )
+        self.install_wrapper_scripts(dist)
+
+    def add_output(self, path):
+        if os.path.isdir(path):
+            for base, dirs, files in os.walk(path):
+                for filename in files:
+                    self.outputs.append(os.path.join(base, filename))
+        else:
+            self.outputs.append(path)
+
+    def not_editable(self, spec):
+        if self.editable:
+            raise DistutilsArgError(
+                "Invalid argument %r: you can't use filenames or URLs "
+                "with --editable (except via the --find-links option)."
+                % (spec,)
+            )
+
+    def check_editable(self, spec):
+        if not self.editable:
+            return
+
+        if os.path.exists(os.path.join(self.build_directory, spec.key)):
+            raise DistutilsArgError(
+                "%r already exists in %s; can't do a checkout there" %
+                (spec.key, self.build_directory)
+            )
+
+    @contextlib.contextmanager
+    def _tmpdir(self):
+        tmpdir = tempfile.mkdtemp(prefix=six.u("easy_install-"))
+        try:
+            # cast to str as workaround for #709 and #710 and #712
+            yield str(tmpdir)
+        finally:
+            os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir))
+
+    def easy_install(self, spec, deps=False):
+        if not self.editable:
+            self.install_site_py()
+
+        with self._tmpdir() as tmpdir:
+            if not isinstance(spec, Requirement):
+                if URL_SCHEME(spec):
+                    # It's a url, download it to tmpdir and process
+                    self.not_editable(spec)
+                    dl = self.package_index.download(spec, tmpdir)
+                    return self.install_item(None, dl, tmpdir, deps, True)
+
+                elif os.path.exists(spec):
+                    # Existing file or directory, just process it directly
+                    self.not_editable(spec)
+                    return self.install_item(None, spec, tmpdir, deps, True)
+                else:
+                    spec = parse_requirement_arg(spec)
+
+            self.check_editable(spec)
+            dist = self.package_index.fetch_distribution(
+                spec, tmpdir, self.upgrade, self.editable,
+                not self.always_copy, self.local_index
+            )
+            if dist is None:
+                msg = "Could not find suitable distribution for %r" % spec
+                if self.always_copy:
+                    msg += " (--always-copy skips system and development eggs)"
+                raise DistutilsError(msg)
+            elif dist.precedence == DEVELOP_DIST:
+                # .egg-info dists don't need installing, just process deps
+                self.process_distribution(spec, dist, deps, "Using")
+                return dist
+            else:
+                return self.install_item(spec, dist.location, tmpdir, deps)
+
+    def install_item(self, spec, download, tmpdir, deps, install_needed=False):
+
+        # Installation is also needed if file in tmpdir or is not an egg
+        install_needed = install_needed or self.always_copy
+        install_needed = install_needed or os.path.dirname(download) == tmpdir
+        install_needed = install_needed or not download.endswith('.egg')
+        install_needed = install_needed or (
+            self.always_copy_from is not None and
+            os.path.dirname(normalize_path(download)) ==
+            normalize_path(self.always_copy_from)
+        )
+
+        if spec and not install_needed:
+            # at this point, we know it's a local .egg, we just don't know if
+            # it's already installed.
+            for dist in self.local_index[spec.project_name]:
+                if dist.location == download:
+                    break
+            else:
+                install_needed = True  # it's not in the local index
+
+        log.info("Processing %s", os.path.basename(download))
+
+        if install_needed:
+            dists = self.install_eggs(spec, download, tmpdir)
+            for dist in dists:
+                self.process_distribution(spec, dist, deps)
+        else:
+            dists = [self.egg_distribution(download)]
+            self.process_distribution(spec, dists[0], deps, "Using")
+
+        if spec is not None:
+            for dist in dists:
+                if dist in spec:
+                    return dist
+
+    def select_scheme(self, name):
+        """Sets the install directories by applying the install schemes."""
+        # it's the caller's problem if they supply a bad name!
+        scheme = INSTALL_SCHEMES[name]
+        for key in SCHEME_KEYS:
+            attrname = 'install_' + key
+            if getattr(self, attrname) is None:
+                setattr(self, attrname, scheme[key])
+
+    def process_distribution(self, requirement, dist, deps=True, *info):
+        self.update_pth(dist)
+        self.package_index.add(dist)
+        if dist in self.local_index[dist.key]:
+            self.local_index.remove(dist)
+        self.local_index.add(dist)
+        self.install_egg_scripts(dist)
+        self.installed_projects[dist.key] = dist
+        log.info(self.installation_report(requirement, dist, *info))
+        if (dist.has_metadata('dependency_links.txt') and
+                not self.no_find_links):
+            self.package_index.add_find_links(
+                dist.get_metadata_lines('dependency_links.txt')
+            )
+        if not deps and not self.always_copy:
+            return
+        elif requirement is not None and dist.key != requirement.key:
+            log.warn("Skipping dependencies for %s", dist)
+            return  # XXX this is not the distribution we were looking for
+        elif requirement is None or dist not in requirement:
+            # if we wound up with a different version, resolve what we've got
+            distreq = dist.as_requirement()
+            requirement = Requirement(str(distreq))
+        log.info("Processing dependencies for %s", requirement)
+        try:
+            distros = WorkingSet([]).resolve(
+                [requirement], self.local_index, self.easy_install
+            )
+        except DistributionNotFound as e:
+            raise DistutilsError(str(e))
+        except VersionConflict as e:
+            raise DistutilsError(e.report())
+        if self.always_copy or self.always_copy_from:
+            # Force all the relevant distros to be copied or activated
+            for dist in distros:
+                if dist.key not in self.installed_projects:
+                    self.easy_install(dist.as_requirement())
+        log.info("Finished processing dependencies for %s", requirement)
+
+    def should_unzip(self, dist):
+        if self.zip_ok is not None:
+            return not self.zip_ok
+        if dist.has_metadata('not-zip-safe'):
+            return True
+        if not dist.has_metadata('zip-safe'):
+            return True
+        return False
+
+    def maybe_move(self, spec, dist_filename, setup_base):
+        dst = os.path.join(self.build_directory, spec.key)
+        if os.path.exists(dst):
+            msg = (
+                "%r already exists in %s; build directory %s will not be kept"
+            )
+            log.warn(msg, spec.key, self.build_directory, setup_base)
+            return setup_base
+        if os.path.isdir(dist_filename):
+            setup_base = dist_filename
+        else:
+            if os.path.dirname(dist_filename) == setup_base:
+                os.unlink(dist_filename)  # get it out of the tmp dir
+            contents = os.listdir(setup_base)
+            if len(contents) == 1:
+                dist_filename = os.path.join(setup_base, contents[0])
+                if os.path.isdir(dist_filename):
+                    # if the only thing there is a directory, move it instead
+                    setup_base = dist_filename
+        ensure_directory(dst)
+        shutil.move(setup_base, dst)
+        return dst
+
+    def install_wrapper_scripts(self, dist):
+        if self.exclude_scripts:
+            return
+        for args in ScriptWriter.best().get_args(dist):
+            self.write_script(*args)
+
+    def install_script(self, dist, script_name, script_text, dev_path=None):
+        """Generate a legacy script wrapper and install it"""
+        spec = str(dist.as_requirement())
+        is_script = is_python_script(script_text, script_name)
+
+        if is_script:
+            body = self._load_template(dev_path) % locals()
+            script_text = ScriptWriter.get_header(script_text) + body
+        self.write_script(script_name, _to_ascii(script_text), 'b')
+
+    @staticmethod
+    def _load_template(dev_path):
+        """
+        There are a couple of template scripts in the package. This
+        function loads one of them and prepares it for use.
+        """
+        # See https://github.com/pypa/setuptools/issues/134 for info
+        # on script file naming and downstream issues with SVR4
+        name = 'script.tmpl'
+        if dev_path:
+            name = name.replace('.tmpl', ' (dev).tmpl')
+
+        raw_bytes = resource_string('setuptools', name)
+        return raw_bytes.decode('utf-8')
+
+    def write_script(self, script_name, contents, mode="t", blockers=()):
+        """Write an executable file to the scripts directory"""
+        self.delete_blockers(  # clean up old .py/.pyw w/o a script
+            [os.path.join(self.script_dir, x) for x in blockers]
+        )
+        log.info("Installing %s script to %s", script_name, self.script_dir)
+        target = os.path.join(self.script_dir, script_name)
+        self.add_output(target)
+
+        if self.dry_run:
+            return
+
+        mask = current_umask()
+        ensure_directory(target)
+        if os.path.exists(target):
+            os.unlink(target)
+        with open(target, "w" + mode) as f:
+            f.write(contents)
+        chmod(target, 0o777 - mask)
+
+    def install_eggs(self, spec, dist_filename, tmpdir):
+        # .egg dirs or files are already built, so just return them
+        if dist_filename.lower().endswith('.egg'):
+            return [self.install_egg(dist_filename, tmpdir)]
+        elif dist_filename.lower().endswith('.exe'):
+            return [self.install_exe(dist_filename, tmpdir)]
+        elif dist_filename.lower().endswith('.whl'):
+            return [self.install_wheel(dist_filename, tmpdir)]
+
+        # Anything else, try to extract and build
+        setup_base = tmpdir
+        if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
+            unpack_archive(dist_filename, tmpdir, self.unpack_progress)
+        elif os.path.isdir(dist_filename):
+            setup_base = os.path.abspath(dist_filename)
+
+        if (setup_base.startswith(tmpdir)  # something we downloaded
+                and self.build_directory and spec is not None):
+            setup_base = self.maybe_move(spec, dist_filename, setup_base)
+
+        # Find the setup.py file
+        setup_script = os.path.join(setup_base, 'setup.py')
+
+        if not os.path.exists(setup_script):
+            setups = glob(os.path.join(setup_base, '*', 'setup.py'))
+            if not setups:
+                raise DistutilsError(
+                    "Couldn't find a setup script in %s" %
+                    os.path.abspath(dist_filename)
+                )
+            if len(setups) > 1:
+                raise DistutilsError(
+                    "Multiple setup scripts in %s" %
+                    os.path.abspath(dist_filename)
+                )
+            setup_script = setups[0]
+
+        # Now run it, and return the result
+        if self.editable:
+            log.info(self.report_editable(spec, setup_script))
+            return []
+        else:
+            return self.build_and_install(setup_script, setup_base)
+
+    def egg_distribution(self, egg_path):
+        if os.path.isdir(egg_path):
+            metadata = PathMetadata(egg_path, os.path.join(egg_path,
+                                                           'EGG-INFO'))
+        else:
+            metadata = EggMetadata(zipimport.zipimporter(egg_path))
+        return Distribution.from_filename(egg_path, metadata=metadata)
+
+    def install_egg(self, egg_path, tmpdir):
+        destination = os.path.join(
+            self.install_dir,
+            os.path.basename(egg_path),
+        )
+        destination = os.path.abspath(destination)
+        if not self.dry_run:
+            ensure_directory(destination)
+
+        dist = self.egg_distribution(egg_path)
+        if not samefile(egg_path, destination):
+            if os.path.isdir(destination) and not os.path.islink(destination):
+                dir_util.remove_tree(destination, dry_run=self.dry_run)
+            elif os.path.exists(destination):
+                self.execute(
+                    os.unlink,
+                    (destination,),
+                    "Removing " + destination,
+                )
+            try:
+                new_dist_is_zipped = False
+                if os.path.isdir(egg_path):
+                    if egg_path.startswith(tmpdir):
+                        f, m = shutil.move, "Moving"
+                    else:
+                        f, m = shutil.copytree, "Copying"
+                elif self.should_unzip(dist):
+                    self.mkpath(destination)
+                    f, m = self.unpack_and_compile, "Extracting"
+                else:
+                    new_dist_is_zipped = True
+                    if egg_path.startswith(tmpdir):
+                        f, m = shutil.move, "Moving"
+                    else:
+                        f, m = shutil.copy2, "Copying"
+                self.execute(
+                    f,
+                    (egg_path, destination),
+                    (m + " %s to %s") % (
+                        os.path.basename(egg_path),
+                        os.path.dirname(destination)
+                    ),
+                )
+                update_dist_caches(
+                    destination,
+                    fix_zipimporter_caches=new_dist_is_zipped,
+                )
+            except Exception:
+                update_dist_caches(destination, fix_zipimporter_caches=False)
+                raise
+
+        self.add_output(destination)
+        return self.egg_distribution(destination)
+
+    def install_exe(self, dist_filename, tmpdir):
+        # See if it's valid, get data
+        cfg = extract_wininst_cfg(dist_filename)
+        if cfg is None:
+            raise DistutilsError(
+                "%s is not a valid distutils Windows .exe" % dist_filename
+            )
+        # Create a dummy distribution object until we build the real distro
+        dist = Distribution(
+            None,
+            project_name=cfg.get('metadata', 'name'),
+            version=cfg.get('metadata', 'version'), platform=get_platform(),
+        )
+
+        # Convert the .exe to an unpacked egg
+        egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
+        dist.location = egg_path
+        egg_tmp = egg_path + '.tmp'
+        _egg_info = os.path.join(egg_tmp, 'EGG-INFO')
+        pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
+        ensure_directory(pkg_inf)  # make sure EGG-INFO dir exists
+        dist._provider = PathMetadata(egg_tmp, _egg_info)  # XXX
+        self.exe_to_egg(dist_filename, egg_tmp)
+
+        # Write EGG-INFO/PKG-INFO
+        if not os.path.exists(pkg_inf):
+            f = open(pkg_inf, 'w')
+            f.write('Metadata-Version: 1.0\n')
+            for k, v in cfg.items('metadata'):
+                if k != 'target_version':
+                    f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
+            f.close()
+        script_dir = os.path.join(_egg_info, 'scripts')
+        # delete entry-point scripts to avoid duping
+        self.delete_blockers([
+            os.path.join(script_dir, args[0])
+            for args in ScriptWriter.get_args(dist)
+        ])
+        # Build .egg file from tmpdir
+        bdist_egg.make_zipfile(
+            egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
+        )
+        # install the .egg
+        return self.install_egg(egg_path, tmpdir)
+
+    def exe_to_egg(self, dist_filename, egg_tmp):
+        """Extract a bdist_wininst to the directories an egg would use"""
+        # Check for .pth file and set up prefix translations
+        prefixes = get_exe_prefixes(dist_filename)
+        to_compile = []
+        native_libs = []
+        top_level = {}
+
+        def process(src, dst):
+            s = src.lower()
+            for old, new in prefixes:
+                if s.startswith(old):
+                    src = new + src[len(old):]
+                    parts = src.split('/')
+                    dst = os.path.join(egg_tmp, *parts)
+                    dl = dst.lower()
+                    if dl.endswith('.pyd') or dl.endswith('.dll'):
+                        parts[-1] = bdist_egg.strip_module(parts[-1])
+                        top_level[os.path.splitext(parts[0])[0]] = 1
+                        native_libs.append(src)
+                    elif dl.endswith('.py') and old != 'SCRIPTS/':
+                        top_level[os.path.splitext(parts[0])[0]] = 1
+                        to_compile.append(dst)
+                    return dst
+            if not src.endswith('.pth'):
+                log.warn("WARNING: can't process %s", src)
+            return None
+
+        # extract, tracking .pyd/.dll->native_libs and .py -> to_compile
+        unpack_archive(dist_filename, egg_tmp, process)
+        stubs = []
+        for res in native_libs:
+            if res.lower().endswith('.pyd'):  # create stubs for .pyd's
+                parts = res.split('/')
+                resource = parts[-1]
+                parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
+                pyfile = os.path.join(egg_tmp, *parts)
+                to_compile.append(pyfile)
+                stubs.append(pyfile)
+                bdist_egg.write_stub(resource, pyfile)
+        self.byte_compile(to_compile)  # compile .py's
+        bdist_egg.write_safety_flag(
+            os.path.join(egg_tmp, 'EGG-INFO'),
+            bdist_egg.analyze_egg(egg_tmp, stubs))  # write zip-safety flag
+
+        for name in 'top_level', 'native_libs':
+            if locals()[name]:
+                txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
+                if not os.path.exists(txt):
+                    f = open(txt, 'w')
+                    f.write('\n'.join(locals()[name]) + '\n')
+                    f.close()
+
+    def install_wheel(self, wheel_path, tmpdir):
+        wheel = Wheel(wheel_path)
+        assert wheel.is_compatible()
+        destination = os.path.join(self.install_dir, wheel.egg_name())
+        destination = os.path.abspath(destination)
+        if not self.dry_run:
+            ensure_directory(destination)
+        if os.path.isdir(destination) and not os.path.islink(destination):
+            dir_util.remove_tree(destination, dry_run=self.dry_run)
+        elif os.path.exists(destination):
+            self.execute(
+                os.unlink,
+                (destination,),
+                "Removing " + destination,
+            )
+        try:
+            self.execute(
+                wheel.install_as_egg,
+                (destination,),
+                ("Installing %s to %s") % (
+                    os.path.basename(wheel_path),
+                    os.path.dirname(destination)
+                ),
+            )
+        finally:
+            update_dist_caches(destination, fix_zipimporter_caches=False)
+        self.add_output(destination)
+        return self.egg_distribution(destination)
+
+    __mv_warning = textwrap.dedent("""
+        Because this distribution was installed --multi-version, before you can
+        import modules from this package in an application, you will need to
+        'import pkg_resources' and then use a 'require()' call similar to one of
+        these examples, in order to select the desired version:
+
+            pkg_resources.require("%(name)s")  # latest installed version
+            pkg_resources.require("%(name)s==%(version)s")  # this exact version
+            pkg_resources.require("%(name)s>=%(version)s")  # this version or higher
+        """).lstrip()
+
+    __id_warning = textwrap.dedent("""
+        Note also that the installation directory must be on sys.path at runtime for
+        this to work.  (e.g. by being the application's script directory, by being on
+        PYTHONPATH, or by being added to sys.path by your code.)
+        """)
+
+    def installation_report(self, req, dist, what="Installed"):
+        """Helpful installation message for display to package users"""
+        msg = "\n%(what)s %(eggloc)s%(extras)s"
+        if self.multi_version and not self.no_report:
+            msg += '\n' + self.__mv_warning
+            if self.install_dir not in map(normalize_path, sys.path):
+                msg += '\n' + self.__id_warning
+
+        eggloc = dist.location
+        name = dist.project_name
+        version = dist.version
+        extras = ''  # TODO: self.report_extras(req, dist)
+        return msg % locals()
+
+    __editable_msg = textwrap.dedent("""
+        Extracted editable version of %(spec)s to %(dirname)s
+
+        If it uses setuptools in its setup script, you can activate it in
+        "development" mode by going to that directory and running::
+
+            %(python)s setup.py develop
+
+        See the setuptools documentation for the "develop" command for more info.
+        """).lstrip()
+
+    def report_editable(self, spec, setup_script):
+        dirname = os.path.dirname(setup_script)
+        python = sys.executable
+        return '\n' + self.__editable_msg % locals()
+
+    def run_setup(self, setup_script, setup_base, args):
+        sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
+        sys.modules.setdefault('distutils.command.egg_info', egg_info)
+
+        args = list(args)
+        if self.verbose > 2:
+            v = 'v' * (self.verbose - 1)
+            args.insert(0, '-' + v)
+        elif self.verbose < 2:
+            args.insert(0, '-q')
+        if self.dry_run:
+            args.insert(0, '-n')
+        log.info(
+            "Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
+        )
+        try:
+            run_setup(setup_script, args)
+        except SystemExit as v:
+            raise DistutilsError("Setup script exited with %s" % (v.args[0],))
+
+    def build_and_install(self, setup_script, setup_base):
+        args = ['bdist_egg', '--dist-dir']
+
+        dist_dir = tempfile.mkdtemp(
+            prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
+        )
+        try:
+            self._set_fetcher_options(os.path.dirname(setup_script))
+            args.append(dist_dir)
+
+            self.run_setup(setup_script, setup_base, args)
+            all_eggs = Environment([dist_dir])
+            eggs = []
+            for key in all_eggs:
+                for dist in all_eggs[key]:
+                    eggs.append(self.install_egg(dist.location, setup_base))
+            if not eggs and not self.dry_run:
+                log.warn("No eggs found in %s (setup script problem?)",
+                         dist_dir)
+            return eggs
+        finally:
+            rmtree(dist_dir)
+            log.set_verbosity(self.verbose)  # restore our log verbosity
+
+    def _set_fetcher_options(self, base):
+        """
+        When easy_install is about to run bdist_egg on a source dist, that
+        source dist might have 'setup_requires' directives, requiring
+        additional fetching. Ensure the fetcher options given to easy_install
+        are available to that command as well.
+        """
+        # find the fetch options from easy_install and write them out
+        # to the setup.cfg file.
+        ei_opts = self.distribution.get_option_dict('easy_install').copy()
+        fetch_directives = (
+            'find_links', 'site_dirs', 'index_url', 'optimize',
+            'site_dirs', 'allow_hosts',
+        )
+        fetch_options = {}
+        for key, val in ei_opts.items():
+            if key not in fetch_directives:
+                continue
+            fetch_options[key.replace('_', '-')] = val[1]
+        # create a settings dictionary suitable for `edit_config`
+        settings = dict(easy_install=fetch_options)
+        cfg_filename = os.path.join(base, 'setup.cfg')
+        setopt.edit_config(cfg_filename, settings)
+
+    def update_pth(self, dist):
+        if self.pth_file is None:
+            return
+
+        for d in self.pth_file[dist.key]:  # drop old entries
+            if self.multi_version or d.location != dist.location:
+                log.info("Removing %s from easy-install.pth file", d)
+                self.pth_file.remove(d)
+                if d.location in self.shadow_path:
+                    self.shadow_path.remove(d.location)
+
+        if not self.multi_version:
+            if dist.location in self.pth_file.paths:
+                log.info(
+                    "%s is already the active version in easy-install.pth",
+                    dist,
+                )
+            else:
+                log.info("Adding %s to easy-install.pth file", dist)
+                self.pth_file.add(dist)  # add new entry
+                if dist.location not in self.shadow_path:
+                    self.shadow_path.append(dist.location)
+
+        if not self.dry_run:
+
+            self.pth_file.save()
+
+            if dist.key == 'setuptools':
+                # Ensure that setuptools itself never becomes unavailable!
+                # XXX should this check for latest version?
+                filename = os.path.join(self.install_dir, 'setuptools.pth')
+                if os.path.islink(filename):
+                    os.unlink(filename)
+                f = open(filename, 'wt')
+                f.write(self.pth_file.make_relative(dist.location) + '\n')
+                f.close()
+
+    def unpack_progress(self, src, dst):
+        # Progress filter for unpacking
+        log.debug("Unpacking %s to %s", src, dst)
+        return dst  # only unpack-and-compile skips files for dry run
+
+    def unpack_and_compile(self, egg_path, destination):
+        to_compile = []
+        to_chmod = []
+
+        def pf(src, dst):
+            if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
+                to_compile.append(dst)
+            elif dst.endswith('.dll') or dst.endswith('.so'):
+                to_chmod.append(dst)
+            self.unpack_progress(src, dst)
+            return not self.dry_run and dst or None
+
+        unpack_archive(egg_path, destination, pf)
+        self.byte_compile(to_compile)
+        if not self.dry_run:
+            for f in to_chmod:
+                mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
+                chmod(f, mode)
+
+    def byte_compile(self, to_compile):
+        if sys.dont_write_bytecode:
+            return
+
+        from distutils.util import byte_compile
+
+        try:
+            # try to make the byte compile messages quieter
+            log.set_verbosity(self.verbose - 1)
+
+            byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
+            if self.optimize:
+                byte_compile(
+                    to_compile, optimize=self.optimize, force=1,
+                    dry_run=self.dry_run,
+                )
+        finally:
+            log.set_verbosity(self.verbose)  # restore original verbosity
+
+    __no_default_msg = textwrap.dedent("""
+        bad install directory or PYTHONPATH
+
+        You are attempting to install a package to a directory that is not
+        on PYTHONPATH and which Python does not read ".pth" files from.  The
+        installation directory you specified (via --install-dir, --prefix, or
+        the distutils default setting) was:
+
+            %s
+
+        and your PYTHONPATH environment variable currently contains:
+
+            %r
+
+        Here are some of your options for correcting the problem:
+
+        * You can choose a different installation directory, i.e., one that is
+          on PYTHONPATH or supports .pth files
+
+        * You can add the installation directory to the PYTHONPATH environment
+          variable.  (It must then also be on PYTHONPATH whenever you run
+          Python and want to use the package(s) you are installing.)
+
+        * You can set up the installation directory to support ".pth" files by
+          using one of the approaches described here:
+
+          https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
+
+
+        Please make the appropriate changes for your system and try again.""").lstrip()
+
+    def no_default_version_msg(self):
+        template = self.__no_default_msg
+        return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
+
+    def install_site_py(self):
+        """Make sure there's a site.py in the target dir, if needed"""
+
+        if self.sitepy_installed:
+            return  # already did it, or don't need to
+
+        sitepy = os.path.join(self.install_dir, "site.py")
+        source = resource_string("setuptools", "site-patch.py")
+        source = source.decode('utf-8')
+        current = ""
+
+        if os.path.exists(sitepy):
+            log.debug("Checking existing site.py in %s", self.install_dir)
+            with io.open(sitepy) as strm:
+                current = strm.read()
+
+            if not current.startswith('def __boot():'):
+                raise DistutilsError(
+                    "%s is not a setuptools-generated site.py; please"
+                    " remove it." % sitepy
+                )
+
+        if current != source:
+            log.info("Creating %s", sitepy)
+            if not self.dry_run:
+                ensure_directory(sitepy)
+                with io.open(sitepy, 'w', encoding='utf-8') as strm:
+                    strm.write(source)
+            self.byte_compile([sitepy])
+
+        self.sitepy_installed = True
+
+    def create_home_path(self):
+        """Create directories under ~."""
+        if not self.user:
+            return
+        home = convert_path(os.path.expanduser("~"))
+        for name, path in six.iteritems(self.config_vars):
+            if path.startswith(home) and not os.path.isdir(path):
+                self.debug_print("os.makedirs('%s', 0o700)" % path)
+                os.makedirs(path, 0o700)
+
+    INSTALL_SCHEMES = dict(
+        posix=dict(
+            install_dir='$base/lib/python$py_version_short/site-packages',
+            script_dir='$base/bin',
+        ),
+    )
+
+    DEFAULT_SCHEME = dict(
+        install_dir='$base/Lib/site-packages',
+        script_dir='$base/Scripts',
+    )
+
+    def _expand(self, *attrs):
+        config_vars = self.get_finalized_command('install').config_vars
+
+        if self.prefix:
+            # Set default install_dir/scripts from --prefix
+            config_vars = config_vars.copy()
+            config_vars['base'] = self.prefix
+            scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
+            for attr, val in scheme.items():
+                if getattr(self, attr, None) is None:
+                    setattr(self, attr, val)
+
+        from distutils.util import subst_vars
+
+        for attr in attrs:
+            val = getattr(self, attr)
+            if val is not None:
+                val = subst_vars(val, config_vars)
+                if os.name == 'posix':
+                    val = os.path.expanduser(val)
+                setattr(self, attr, val)
+
+
+def _pythonpath():
+    items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
+    return filter(None, items)
+
+
+def get_site_dirs():
+    """
+    Return a list of 'site' dirs
+    """
+
+    sitedirs = []
+
+    # start with PYTHONPATH
+    sitedirs.extend(_pythonpath())
+
+    prefixes = [sys.prefix]
+    if sys.exec_prefix != sys.prefix:
+        prefixes.append(sys.exec_prefix)
+    for prefix in prefixes:
+        if prefix:
+            if sys.platform in ('os2emx', 'riscos'):
+                sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
+            elif os.sep == '/':
+                sitedirs.extend([
+                    os.path.join(
+                        prefix,
+                        "lib",
+                        "python" + sys.version[:3],
+                        "site-packages",
+                    ),
+                    os.path.join(prefix, "lib", "site-python"),
+                ])
+            else:
+                sitedirs.extend([
+                    prefix,
+                    os.path.join(prefix, "lib", "site-packages"),
+                ])
+            if sys.platform == 'darwin':
+                # for framework builds *only* we add the standard Apple
+                # locations. Currently only per-user, but /Library and
+                # /Network/Library could be added too
+                if 'Python.framework' in prefix:
+                    home = os.environ.get('HOME')
+                    if home:
+                        home_sp = os.path.join(
+                            home,
+                            'Library',
+                            'Python',
+                            sys.version[:3],
+                            'site-packages',
+                        )
+                        sitedirs.append(home_sp)
+    lib_paths = get_path('purelib'), get_path('platlib')
+    for site_lib in lib_paths:
+        if site_lib not in sitedirs:
+            sitedirs.append(site_lib)
+
+    if site.ENABLE_USER_SITE:
+        sitedirs.append(site.USER_SITE)
+
+    try:
+        sitedirs.extend(site.getsitepackages())
+    except AttributeError:
+        pass
+
+    sitedirs = list(map(normalize_path, sitedirs))
+
+    return sitedirs
+
+
+def expand_paths(inputs):
+    """Yield sys.path directories that might contain "old-style" packages"""
+
+    seen = {}
+
+    for dirname in inputs:
+        dirname = normalize_path(dirname)
+        if dirname in seen:
+            continue
+
+        seen[dirname] = 1
+        if not os.path.isdir(dirname):
+            continue
+
+        files = os.listdir(dirname)
+        yield dirname, files
+
+        for name in files:
+            if not name.endswith('.pth'):
+                # We only care about the .pth files
+                continue
+            if name in ('easy-install.pth', 'setuptools.pth'):
+                # Ignore .pth files that we control
+                continue
+
+            # Read the .pth file
+            f = open(os.path.join(dirname, name))
+            lines = list(yield_lines(f))
+            f.close()
+
+            # Yield existing non-dupe, non-import directory lines from it
+            for line in lines:
+                if not line.startswith("import"):
+                    line = normalize_path(line.rstrip())
+                    if line not in seen:
+                        seen[line] = 1
+                        if not os.path.isdir(line):
+                            continue
+                        yield line, os.listdir(line)
+
+
+def extract_wininst_cfg(dist_filename):
+    """Extract configuration data from a bdist_wininst .exe
+
+    Returns a configparser.RawConfigParser, or None
+    """
+    f = open(dist_filename, 'rb')
+    try:
+        endrec = zipfile._EndRecData(f)
+        if endrec is None:
+            return None
+
+        prepended = (endrec[9] - endrec[5]) - endrec[6]
+        if prepended < 12:  # no wininst data here
+            return None
+        f.seek(prepended - 12)
+
+        tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
+        if tag not in (0x1234567A, 0x1234567B):
+            return None  # not a valid tag
+
+        f.seek(prepended - (12 + cfglen))
+        init = {'version': '', 'target_version': ''}
+        cfg = configparser.RawConfigParser(init)
+        try:
+            part = f.read(cfglen)
+            # Read up to the first null byte.
+            config = part.split(b'\0', 1)[0]
+            # Now the config is in bytes, but for RawConfigParser, it should
+            #  be text, so decode it.
+            config = config.decode(sys.getfilesystemencoding())
+            cfg.readfp(six.StringIO(config))
+        except configparser.Error:
+            return None
+        if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
+            return None
+        return cfg
+
+    finally:
+        f.close()
+
+
+def get_exe_prefixes(exe_filename):
+    """Get exe->egg path translations for a given .exe file"""
+
+    prefixes = [
+        ('PURELIB/', ''),
+        ('PLATLIB/pywin32_system32', ''),
+        ('PLATLIB/', ''),
+        ('SCRIPTS/', 'EGG-INFO/scripts/'),
+        ('DATA/lib/site-packages', ''),
+    ]
+    z = zipfile.ZipFile(exe_filename)
+    try:
+        for info in z.infolist():
+            name = info.filename
+            parts = name.split('/')
+            if len(parts) == 3 and parts[2] == 'PKG-INFO':
+                if parts[1].endswith('.egg-info'):
+                    prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
+                    break
+            if len(parts) != 2 or not name.endswith('.pth'):
+                continue
+            if name.endswith('-nspkg.pth'):
+                continue
+            if parts[0].upper() in ('PURELIB', 'PLATLIB'):
+                contents = z.read(name)
+                if six.PY3:
+                    contents = contents.decode()
+                for pth in yield_lines(contents):
+                    pth = pth.strip().replace('\\', '/')
+                    if not pth.startswith('import'):
+                        prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
+    finally:
+        z.close()
+    prefixes = [(x.lower(), y) for x, y in prefixes]
+    prefixes.sort()
+    prefixes.reverse()
+    return prefixes
+
+
+class PthDistributions(Environment):
+    """A .pth file with Distribution paths in it"""
+
+    dirty = False
+
+    def __init__(self, filename, sitedirs=()):
+        self.filename = filename
+        self.sitedirs = list(map(normalize_path, sitedirs))
+        self.basedir = normalize_path(os.path.dirname(self.filename))
+        self._load()
+        Environment.__init__(self, [], None, None)
+        for path in yield_lines(self.paths):
+            list(map(self.add, find_distributions(path, True)))
+
+    def _load(self):
+        self.paths = []
+        saw_import = False
+        seen = dict.fromkeys(self.sitedirs)
+        if os.path.isfile(self.filename):
+            f = open(self.filename, 'rt')
+            for line in f:
+                if line.startswith('import'):
+                    saw_import = True
+                    continue
+                path = line.rstrip()
+                self.paths.append(path)
+                if not path.strip() or path.strip().startswith('#'):
+                    continue
+                # skip non-existent paths, in case somebody deleted a package
+                # manually, and duplicate paths as well
+                path = self.paths[-1] = normalize_path(
+                    os.path.join(self.basedir, path)
+                )
+                if not os.path.exists(path) or path in seen:
+                    self.paths.pop()  # skip it
+                    self.dirty = True  # we cleaned up, so we're dirty now :)
+                    continue
+                seen[path] = 1
+            f.close()
+
+        if self.paths and not saw_import:
+            self.dirty = True  # ensure anything we touch has import wrappers
+        while self.paths and not self.paths[-1].strip():
+            self.paths.pop()
+
+    def save(self):
+        """Write changed .pth file back to disk"""
+        if not self.dirty:
+            return
+
+        rel_paths = list(map(self.make_relative, self.paths))
+        if rel_paths:
+            log.debug("Saving %s", self.filename)
+            lines = self._wrap_lines(rel_paths)
+            data = '\n'.join(lines) + '\n'
+
+            if os.path.islink(self.filename):
+                os.unlink(self.filename)
+            with open(self.filename, 'wt') as f:
+                f.write(data)
+
+        elif os.path.exists(self.filename):
+            log.debug("Deleting empty %s", self.filename)
+            os.unlink(self.filename)
+
+        self.dirty = False
+
+    @staticmethod
+    def _wrap_lines(lines):
+        return lines
+
+    def add(self, dist):
+        """Add `dist` to the distribution map"""
+        new_path = (
+            dist.location not in self.paths and (
+                dist.location not in self.sitedirs or
+                # account for '.' being in PYTHONPATH
+                dist.location == os.getcwd()
+            )
+        )
+        if new_path:
+            self.paths.append(dist.location)
+            self.dirty = True
+        Environment.add(self, dist)
+
+    def remove(self, dist):
+        """Remove `dist` from the distribution map"""
+        while dist.location in self.paths:
+            self.paths.remove(dist.location)
+            self.dirty = True
+        Environment.remove(self, dist)
+
+    def make_relative(self, path):
+        npath, last = os.path.split(normalize_path(path))
+        baselen = len(self.basedir)
+        parts = [last]
+        sep = os.altsep == '/' and '/' or os.sep
+        while len(npath) >= baselen:
+            if npath == self.basedir:
+                parts.append(os.curdir)
+                parts.reverse()
+                return sep.join(parts)
+            npath, last = os.path.split(npath)
+            parts.append(last)
+        else:
+            return path
+
+
+class RewritePthDistributions(PthDistributions):
+    @classmethod
+    def _wrap_lines(cls, lines):
+        yield cls.prelude
+        for line in lines:
+            yield line
+        yield cls.postlude
+
+    prelude = _one_liner("""
+        import sys
+        sys.__plen = len(sys.path)
+        """)
+    postlude = _one_liner("""
+        import sys
+        new = sys.path[sys.__plen:]
+        del sys.path[sys.__plen:]
+        p = getattr(sys, '__egginsert', 0)
+        sys.path[p:p] = new
+        sys.__egginsert = p + len(new)
+        """)
+
+
+if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
+    PthDistributions = RewritePthDistributions
+
+
+def _first_line_re():
+    """
+    Return a regular expression based on first_line_re suitable for matching
+    strings.
+    """
+    if isinstance(first_line_re.pattern, str):
+        return first_line_re
+
+    # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
+    return re.compile(first_line_re.pattern.decode())
+
+
+def auto_chmod(func, arg, exc):
+    if func in [os.unlink, os.remove] and os.name == 'nt':
+        chmod(arg, stat.S_IWRITE)
+        return func(arg)
+    et, ev, _ = sys.exc_info()
+    six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
+
+
+def update_dist_caches(dist_path, fix_zipimporter_caches):
+    """
+    Fix any globally cached `dist_path` related data
+
+    `dist_path` should be a path of a newly installed egg distribution (zipped
+    or unzipped).
+
+    sys.path_importer_cache contains finder objects that have been cached when
+    importing data from the original distribution. Any such finders need to be
+    cleared since the replacement distribution might be packaged differently,
+    e.g. a zipped egg distribution might get replaced with an unzipped egg
+    folder or vice versa. Having the old finders cached may then cause Python
+    to attempt loading modules from the replacement distribution using an
+    incorrect loader.
+
+    zipimport.zipimporter objects are Python loaders charged with importing
+    data packaged inside zip archives. If stale loaders referencing the
+    original distribution, are left behind, they can fail to load modules from
+    the replacement distribution. E.g. if an old zipimport.zipimporter instance
+    is used to load data from a new zipped egg archive, it may cause the
+    operation to attempt to locate the requested data in the wrong location -
+    one indicated by the original distribution's zip archive directory
+    information. Such an operation may then fail outright, e.g. report having
+    read a 'bad local file header', or even worse, it may fail silently &
+    return invalid data.
+
+    zipimport._zip_directory_cache contains cached zip archive directory
+    information for all existing zipimport.zipimporter instances and all such
+    instances connected to the same archive share the same cached directory
+    information.
+
+    If asked, and the underlying Python implementation allows it, we can fix
+    all existing zipimport.zipimporter instances instead of having to track
+    them down and remove them one by one, by updating their shared cached zip
+    archive directory information. This, of course, assumes that the
+    replacement distribution is packaged as a zipped egg.
+
+    If not asked to fix existing zipimport.zipimporter instances, we still do
+    our best to clear any remaining zipimport.zipimporter related cached data
+    that might somehow later get used when attempting to load data from the new
+    distribution and thus cause such load operations to fail. Note that when
+    tracking down such remaining stale data, we can not catch every conceivable
+    usage from here, and we clear only those that we know of and have found to
+    cause problems if left alive. Any remaining caches should be updated by
+    whomever is in charge of maintaining them, i.e. they should be ready to
+    handle us replacing their zip archives with new distributions at runtime.
+
+    """
+    # There are several other known sources of stale zipimport.zipimporter
+    # instances that we do not clear here, but might if ever given a reason to
+    # do so:
+    # * Global setuptools pkg_resources.working_set (a.k.a. 'master working
+    # set') may contain distributions which may in turn contain their
+    #   zipimport.zipimporter loaders.
+    # * Several zipimport.zipimporter loaders held by local variables further
+    #   up the function call stack when running the setuptools installation.
+    # * Already loaded modules may have their __loader__ attribute set to the
+    #   exact loader instance used when importing them. Python 3.4 docs state
+    #   that this information is intended mostly for introspection and so is
+    #   not expected to cause us problems.
+    normalized_path = normalize_path(dist_path)
+    _uncache(normalized_path, sys.path_importer_cache)
+    if fix_zipimporter_caches:
+        _replace_zip_directory_cache_data(normalized_path)
+    else:
+        # Here, even though we do not want to fix existing and now stale
+        # zipimporter cache information, we still want to remove it. Related to
+        # Python's zip archive directory information cache, we clear each of
+        # its stale entries in two phases:
+        #   1. Clear the entry so attempting to access zip archive information
+        #      via any existing stale zipimport.zipimporter instances fails.
+        #   2. Remove the entry from the cache so any newly constructed
+        #      zipimport.zipimporter instances do not end up using old stale
+        #      zip archive directory information.
+        # This whole stale data removal step does not seem strictly necessary,
+        # but has been left in because it was done before we started replacing
+        # the zip archive directory information cache content if possible, and
+        # there are no relevant unit tests that we can depend on to tell us if
+        # this is really needed.
+        _remove_and_clear_zip_directory_cache_data(normalized_path)
+
+
+def _collect_zipimporter_cache_entries(normalized_path, cache):
+    """
+    Return zipimporter cache entry keys related to a given normalized path.
+
+    Alternative path spellings (e.g. those using different character case or
+    those using alternative path separators) related to the same path are
+    included. Any sub-path entries are included as well, i.e. those
+    corresponding to zip archives embedded in other zip archives.
+
+    """
+    result = []
+    prefix_len = len(normalized_path)
+    for p in cache:
+        np = normalize_path(p)
+        if (np.startswith(normalized_path) and
+                np[prefix_len:prefix_len + 1] in (os.sep, '')):
+            result.append(p)
+    return result
+
+
+def _update_zipimporter_cache(normalized_path, cache, updater=None):
+    """
+    Update zipimporter cache data for a given normalized path.
+
+    Any sub-path entries are processed as well, i.e. those corresponding to zip
+    archives embedded in other zip archives.
+
+    Given updater is a callable taking a cache entry key and the original entry
+    (after already removing the entry from the cache), and expected to update
+    the entry and possibly return a new one to be inserted in its place.
+    Returning None indicates that the entry should not be replaced with a new
+    one. If no updater is given, the cache entries are simply removed without
+    any additional processing, the same as if the updater simply returned None.
+
+    """
+    for p in _collect_zipimporter_cache_entries(normalized_path, cache):
+        # N.B. pypy's custom zipimport._zip_directory_cache implementation does
+        # not support the complete dict interface:
+        # * Does not support item assignment, thus not allowing this function
+        #    to be used only for removing existing cache entries.
+        #  * Does not support the dict.pop() method, forcing us to use the
+        #    get/del patterns instead. For more detailed information see the
+        #    following links:
+        #      https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
+        #      http://bit.ly/2h9itJX
+        old_entry = cache[p]
+        del cache[p]
+        new_entry = updater and updater(p, old_entry)
+        if new_entry is not None:
+            cache[p] = new_entry
+
+
+def _uncache(normalized_path, cache):
+    _update_zipimporter_cache(normalized_path, cache)
+
+
+def _remove_and_clear_zip_directory_cache_data(normalized_path):
+    def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
+        old_entry.clear()
+
+    _update_zipimporter_cache(
+        normalized_path, zipimport._zip_directory_cache,
+        updater=clear_and_remove_cached_zip_archive_directory_data)
+
+
+# PyPy Python implementation does not allow directly writing to the
+# zipimport._zip_directory_cache and so prevents us from attempting to correct
+# its content. The best we can do there is clear the problematic cache content
+# and have PyPy repopulate it as needed. The downside is that if there are any
+# stale zipimport.zipimporter instances laying around, attempting to use them
+# will fail due to not having its zip archive directory information available
+# instead of being automatically corrected to use the new correct zip archive
+# directory information.
+if '__pypy__' in sys.builtin_module_names:
+    _replace_zip_directory_cache_data = \
+        _remove_and_clear_zip_directory_cache_data
+else:
+
+    def _replace_zip_directory_cache_data(normalized_path):
+        def replace_cached_zip_archive_directory_data(path, old_entry):
+            # N.B. In theory, we could load the zip directory information just
+            # once for all updated path spellings, and then copy it locally and
+            # update its contained path strings to contain the correct
+            # spelling, but that seems like a way too invasive move (this cache
+            # structure is not officially documented anywhere and could in
+            # theory change with new Python releases) for no significant
+            # benefit.
+            old_entry.clear()
+            zipimport.zipimporter(path)
+            old_entry.update(zipimport._zip_directory_cache[path])
+            return old_entry
+
+        _update_zipimporter_cache(
+            normalized_path, zipimport._zip_directory_cache,
+            updater=replace_cached_zip_archive_directory_data)
+
+
+def is_python(text, filename='<string>'):
+    "Is this string a valid Python script?"
+    try:
+        compile(text, filename, 'exec')
+    except (SyntaxError, TypeError):
+        return False
+    else:
+        return True
+
+
+def is_sh(executable):
+    """Determine if the specified executable is a .sh (contains a #! line)"""
+    try:
+        with io.open(executable, encoding='latin-1') as fp:
+            magic = fp.read(2)
+    except (OSError, IOError):
+        return executable
+    return magic == '#!'
+
+
+def nt_quote_arg(arg):
+    """Quote a command line argument according to Windows parsing rules"""
+    return subprocess.list2cmdline([arg])
+
+
+def is_python_script(script_text, filename):
+    """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
+    """
+    if filename.endswith('.py') or filename.endswith('.pyw'):
+        return True  # extension says it's Python
+    if is_python(script_text, filename):
+        return True  # it's syntactically valid Python
+    if script_text.startswith('#!'):
+        # It begins with a '#!' line, so check if 'python' is in it somewhere
+        return 'python' in script_text.splitlines()[0].lower()
+
+    return False  # Not any Python I can recognize
+
+
+try:
+    from os import chmod as _chmod
+except ImportError:
+    # Jython compatibility
+    def _chmod(*args):
+        pass
+
+
+def chmod(path, mode):
+    log.debug("changing mode of %s to %o", path, mode)
+    try:
+        _chmod(path, mode)
+    except os.error as e:
+        log.debug("chmod failed: %s", e)
+
+
+class CommandSpec(list):
+    """
+    A command spec for a #! header, specified as a list of arguments akin to
+    those passed to Popen.
+    """
+
+    options = []
+    split_args = dict()
+
+    @classmethod
+    def best(cls):
+        """
+        Choose the best CommandSpec class based on environmental conditions.
+        """
+        return cls
+
+    @classmethod
+    def _sys_executable(cls):
+        _default = os.path.normpath(sys.executable)
+        return os.environ.get('__PYVENV_LAUNCHER__', _default)
+
+    @classmethod
+    def from_param(cls, param):
+        """
+        Construct a CommandSpec from a parameter to build_scripts, which may
+        be None.
+        """
+        if isinstance(param, cls):
+            return param
+        if isinstance(param, list):
+            return cls(param)
+        if param is None:
+            return cls.from_environment()
+        # otherwise, assume it's a string.
+        return cls.from_string(param)
+
+    @classmethod
+    def from_environment(cls):
+        return cls([cls._sys_executable()])
+
+    @classmethod
+    def from_string(cls, string):
+        """
+        Construct a command spec from a simple string representing a command
+        line parseable by shlex.split.
+        """
+        items = shlex.split(string, **cls.split_args)
+        return cls(items)
+
+    def install_options(self, script_text):
+        self.options = shlex.split(self._extract_options(script_text))
+        cmdline = subprocess.list2cmdline(self)
+        if not isascii(cmdline):
+            self.options[:0] = ['-x']
+
+    @staticmethod
+    def _extract_options(orig_script):
+        """
+        Extract any options from the first line of the script.
+        """
+        first = (orig_script + '\n').splitlines()[0]
+        match = _first_line_re().match(first)
+        options = match.group(1) or '' if match else ''
+        return options.strip()
+
+    def as_header(self):
+        return self._render(self + list(self.options))
+
+    @staticmethod
+    def _strip_quotes(item):
+        _QUOTES = '"\''
+        for q in _QUOTES:
+            if item.startswith(q) and item.endswith(q):
+                return item[1:-1]
+        return item
+
+    @staticmethod
+    def _render(items):
+        cmdline = subprocess.list2cmdline(
+            CommandSpec._strip_quotes(item.strip()) for item in items)
+        return '#!' + cmdline + '\n'
+
+
+# For pbr compat; will be removed in a future version.
+sys_executable = CommandSpec._sys_executable()
+
+
+class WindowsCommandSpec(CommandSpec):
+    split_args = dict(posix=False)
+
+
+class ScriptWriter(object):
+    """
+    Encapsulates behavior around writing entry point scripts for console and
+    gui apps.
+    """
+
+    template = textwrap.dedent(r"""
+        # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
+        __requires__ = %(spec)r
+        import re
+        import sys
+        from pkg_resources import load_entry_point
+
+        if __name__ == '__main__':
+            sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+            sys.exit(
+                load_entry_point(%(spec)r, %(group)r, %(name)r)()
+            )
+    """).lstrip()
+
+    command_spec_class = CommandSpec
+
+    @classmethod
+    def get_script_args(cls, dist, executable=None, wininst=False):
+        # for backward compatibility
+        warnings.warn("Use get_args", DeprecationWarning)
+        writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
+        header = cls.get_script_header("", executable, wininst)
+        return writer.get_args(dist, header)
+
+    @classmethod
+    def get_script_header(cls, script_text, executable=None, wininst=False):
+        # for backward compatibility
+        warnings.warn("Use get_header", DeprecationWarning)
+        if wininst:
+            executable = "python.exe"
+        cmd = cls.command_spec_class.best().from_param(executable)
+        cmd.install_options(script_text)
+        return cmd.as_header()
+
+    @classmethod
+    def get_args(cls, dist, header=None):
+        """
+        Yield write_script() argument tuples for a distribution's
+        console_scripts and gui_scripts entry points.
+        """
+        if header is None:
+            header = cls.get_header()
+        spec = str(dist.as_requirement())
+        for type_ in 'console', 'gui':
+            group = type_ + '_scripts'
+            for name, ep in dist.get_entry_map(group).items():
+                cls._ensure_safe_name(name)
+                script_text = cls.template % locals()
+                args = cls._get_script_args(type_, name, header, script_text)
+                for res in args:
+                    yield res
+
+    @staticmethod
+    def _ensure_safe_name(name):
+        """
+        Prevent paths in *_scripts entry point names.
+        """
+        has_path_sep = re.search(r'[\\/]', name)
+        if has_path_sep:
+            raise ValueError("Path separators not allowed in script names")
+
+    @classmethod
+    def get_writer(cls, force_windows):
+        # for backward compatibility
+        warnings.warn("Use best", DeprecationWarning)
+        return WindowsScriptWriter.best() if force_windows else cls.best()
+
+    @classmethod
+    def best(cls):
+        """
+        Select the best ScriptWriter for this environment.
+        """
+        if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
+            return WindowsScriptWriter.best()
+        else:
+            return cls
+
+    @classmethod
+    def _get_script_args(cls, type_, name, header, script_text):
+        # Simply write the stub with no extension.
+        yield (name, header + script_text)
+
+    @classmethod
+    def get_header(cls, script_text="", executable=None):
+        """Create a #! line, getting options (if any) from script_text"""
+        cmd = cls.command_spec_class.best().from_param(executable)
+        cmd.install_options(script_text)
+        return cmd.as_header()
+
+
+class WindowsScriptWriter(ScriptWriter):
+    command_spec_class = WindowsCommandSpec
+
+    @classmethod
+    def get_writer(cls):
+        # for backward compatibility
+        warnings.warn("Use best", DeprecationWarning)
+        return cls.best()
+
+    @classmethod
+    def best(cls):
+        """
+        Select the best ScriptWriter suitable for Windows
+        """
+        writer_lookup = dict(
+            executable=WindowsExecutableLauncherWriter,
+            natural=cls,
+        )
+        # for compatibility, use the executable launcher by default
+        launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
+        return writer_lookup[launcher]
+
+    @classmethod
+    def _get_script_args(cls, type_, name, header, script_text):
+        "For Windows, add a .py extension"
+        ext = dict(console='.pya', gui='.pyw')[type_]
+        if ext not in os.environ['PATHEXT'].lower().split(';'):
+            msg = (
+                "{ext} not listed in PATHEXT; scripts will not be "
+                "recognized as executables."
+            ).format(**locals())
+            warnings.warn(msg, UserWarning)
+        old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
+        old.remove(ext)
+        header = cls._adjust_header(type_, header)
+        blockers = [name + x for x in old]
+        yield name + ext, header + script_text, 't', blockers
+
+    @classmethod
+    def _adjust_header(cls, type_, orig_header):
+        """
+        Make sure 'pythonw' is used for gui and and 'python' is used for
+        console (regardless of what sys.executable is).
+        """
+        pattern = 'pythonw.exe'
+        repl = 'python.exe'
+        if type_ == 'gui':
+            pattern, repl = repl, pattern
+        pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
+        new_header = pattern_ob.sub(string=orig_header, repl=repl)
+        return new_header if cls._use_header(new_header) else orig_header
+
+    @staticmethod
+    def _use_header(new_header):
+        """
+        Should _adjust_header use the replaced header?
+
+        On non-windows systems, always use. On
+        Windows systems, only use the replaced header if it resolves
+        to an executable on the system.
+        """
+        clean_header = new_header[2:-1].strip('"')
+        return sys.platform != 'win32' or find_executable(clean_header)
+
+
+class WindowsExecutableLauncherWriter(WindowsScriptWriter):
+    @classmethod
+    def _get_script_args(cls, type_, name, header, script_text):
+        """
+        For Windows, add a .py extension and an .exe launcher
+        """
+        if type_ == 'gui':
+            launcher_type = 'gui'
+            ext = '-script.pyw'
+            old = ['.pyw']
+        else:
+            launcher_type = 'cli'
+            ext = '-script.py'
+            old = ['.py', '.pyc', '.pyo']
+        hdr = cls._adjust_header(type_, header)
+        blockers = [name + x for x in old]
+        yield (name + ext, hdr + script_text, 't', blockers)
+        yield (
+            name + '.exe', get_win_launcher(launcher_type),
+            'b'  # write in binary mode
+        )
+        if not is_64bit():
+            # install a manifest for the launcher to prevent Windows
+            # from detecting it as an installer (which it will for
+            #  launchers like easy_install.exe). Consider only
+            #  adding a manifest for launchers detected as installers.
+            #  See Distribute #143 for details.
+            m_name = name + '.exe.manifest'
+            yield (m_name, load_launcher_manifest(name), 't')
+
+
+# for backward-compatibility
+get_script_args = ScriptWriter.get_script_args
+get_script_header = ScriptWriter.get_script_header
+
+
+def get_win_launcher(type):
+    """
+    Load the Windows launcher (executable) suitable for launching a script.
+
+    `type` should be either 'cli' or 'gui'
+
+    Returns the executable as a byte string.
+    """
+    launcher_fn = '%s.exe' % type
+    if is_64bit():
+        launcher_fn = launcher_fn.replace(".", "-64.")
+    else:
+        launcher_fn = launcher_fn.replace(".", "-32.")
+    return resource_string('setuptools', launcher_fn)
+
+
+def load_launcher_manifest(name):
+    manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
+    if six.PY2:
+        return manifest % vars()
+    else:
+        return manifest.decode('utf-8') % vars()
+
+
+def rmtree(path, ignore_errors=False, onerror=auto_chmod):
+    return shutil.rmtree(path, ignore_errors, onerror)
+
+
+def current_umask():
+    tmp = os.umask(0o022)
+    os.umask(tmp)
+    return tmp
+
+
+def bootstrap():
+    # This function is called when setuptools*.egg is run using /bin/sh
+    import setuptools
+
+    argv0 = os.path.dirname(setuptools.__path__[0])
+    sys.argv[0] = argv0
+    sys.argv.append(argv0)
+    main()
+
+
+def main(argv=None, **kw):
+    from setuptools import setup
+    from setuptools.dist import Distribution
+
+    class DistributionWithoutHelpCommands(Distribution):
+        common_usage = ""
+
+        def _show_help(self, *args, **kw):
+            with _patch_usage():
+                Distribution._show_help(self, *args, **kw)
+
+    if argv is None:
+        argv = sys.argv[1:]
+
+    with _patch_usage():
+        setup(
+            script_args=['-q', 'easy_install', '-v'] + argv,
+            script_name=sys.argv[0] or 'easy_install',
+            distclass=DistributionWithoutHelpCommands,
+            **kw
+        )
+
+
+@contextlib.contextmanager
+def _patch_usage():
+    import distutils.core
+    USAGE = textwrap.dedent("""
+        usage: %(script)s [options] requirement_or_url ...
+           or: %(script)s --help
+        """).lstrip()
+
+    def gen_usage(script_name):
+        return USAGE % dict(
+            script=os.path.basename(script_name),
+        )
+
+    saved = distutils.core.gen_usage
+    distutils.core.gen_usage = gen_usage
+    try:
+        yield
+    finally:
+        distutils.core.gen_usage = saved
diff --git a/setuptools/command/egg_info.py b/setuptools/command/egg_info.py
new file mode 100755
index 0000000..f3e604d
--- /dev/null
+++ b/setuptools/command/egg_info.py
@@ -0,0 +1,696 @@
+"""setuptools.command.egg_info
+
+Create a distribution's .egg-info directory and contents"""
+
+from distutils.filelist import FileList as _FileList
+from distutils.errors import DistutilsInternalError
+from distutils.util import convert_path
+from distutils import log
+import distutils.errors
+import distutils.filelist
+import os
+import re
+import sys
+import io
+import warnings
+import time
+import collections
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import map
+
+from setuptools import Command
+from setuptools.command.sdist import sdist
+from setuptools.command.sdist import walk_revctrl
+from setuptools.command.setopt import edit_config
+from setuptools.command import bdist_egg
+from pkg_resources import (
+    parse_requirements, safe_name, parse_version,
+    safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
+import setuptools.unicode_utils as unicode_utils
+from setuptools.glob import glob
+
+from setuptools.extern import packaging
+
+
+def translate_pattern(glob):
+    """
+    Translate a file path glob like '*.txt' in to a regular expression.
+    This differs from fnmatch.translate which allows wildcards to match
+    directory separators. It also knows about '**/' which matches any number of
+    directories.
+    """
+    pat = ''
+
+    # This will split on '/' within [character classes]. This is deliberate.
+    chunks = glob.split(os.path.sep)
+
+    sep = re.escape(os.sep)
+    valid_char = '[^%s]' % (sep,)
+
+    for c, chunk in enumerate(chunks):
+        last_chunk = c == len(chunks) - 1
+
+        # Chunks that are a literal ** are globstars. They match anything.
+        if chunk == '**':
+            if last_chunk:
+                # Match anything if this is the last component
+                pat += '.*'
+            else:
+                # Match '(name/)*'
+                pat += '(?:%s+%s)*' % (valid_char, sep)
+            continue  # Break here as the whole path component has been handled
+
+        # Find any special characters in the remainder
+        i = 0
+        chunk_len = len(chunk)
+        while i < chunk_len:
+            char = chunk[i]
+            if char == '*':
+                # Match any number of name characters
+                pat += valid_char + '*'
+            elif char == '?':
+                # Match a name character
+                pat += valid_char
+            elif char == '[':
+                # Character class
+                inner_i = i + 1
+                # Skip initial !/] chars
+                if inner_i < chunk_len and chunk[inner_i] == '!':
+                    inner_i = inner_i + 1
+                if inner_i < chunk_len and chunk[inner_i] == ']':
+                    inner_i = inner_i + 1
+
+                # Loop till the closing ] is found
+                while inner_i < chunk_len and chunk[inner_i] != ']':
+                    inner_i = inner_i + 1
+
+                if inner_i >= chunk_len:
+                    # Got to the end of the string without finding a closing ]
+                    # Do not treat this as a matching group, but as a literal [
+                    pat += re.escape(char)
+                else:
+                    # Grab the insides of the [brackets]
+                    inner = chunk[i + 1:inner_i]
+                    char_class = ''
+
+                    # Class negation
+                    if inner[0] == '!':
+                        char_class = '^'
+                        inner = inner[1:]
+
+                    char_class += re.escape(inner)
+                    pat += '[%s]' % (char_class,)
+
+                    # Skip to the end ]
+                    i = inner_i
+            else:
+                pat += re.escape(char)
+            i += 1
+
+        # Join each chunk with the dir separator
+        if not last_chunk:
+            pat += sep
+
+    pat += r'\Z'
+    return re.compile(pat, flags=re.MULTILINE|re.DOTALL)
+
+
+class egg_info(Command):
+    description = "create a distribution's .egg-info directory"
+
+    user_options = [
+        ('egg-base=', 'e', "directory containing .egg-info directories"
+                           " (default: top of the source tree)"),
+        ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
+        ('tag-build=', 'b', "Specify explicit tag to add to version number"),
+        ('no-date', 'D', "Don't include date stamp [default]"),
+    ]
+
+    boolean_options = ['tag-date']
+    negative_opt = {
+        'no-date': 'tag-date',
+    }
+
+    def initialize_options(self):
+        self.egg_name = None
+        self.egg_version = None
+        self.egg_base = None
+        self.egg_info = None
+        self.tag_build = None
+        self.tag_date = 0
+        self.broken_egg_info = False
+        self.vtags = None
+
+    ####################################
+    # allow the 'tag_svn_revision' to be detected and
+    # set, supporting sdists built on older Setuptools.
+    @property
+    def tag_svn_revision(self):
+        pass
+
+    @tag_svn_revision.setter
+    def tag_svn_revision(self, value):
+        pass
+    ####################################
+
+    def save_version_info(self, filename):
+        """
+        Materialize the value of date into the
+        build tag. Install build keys in a deterministic order
+        to avoid arbitrary reordering on subsequent builds.
+        """
+        egg_info = collections.OrderedDict()
+        # follow the order these keys would have been added
+        # when PYTHONHASHSEED=0
+        egg_info['tag_build'] = self.tags()
+        egg_info['tag_date'] = 0
+        edit_config(filename, dict(egg_info=egg_info))
+
+    def finalize_options(self):
+        self.egg_name = safe_name(self.distribution.get_name())
+        self.vtags = self.tags()
+        self.egg_version = self.tagged_version()
+
+        parsed_version = parse_version(self.egg_version)
+
+        try:
+            is_version = isinstance(parsed_version, packaging.version.Version)
+            spec = (
+                "%s==%s" if is_version else "%s===%s"
+            )
+            list(
+                parse_requirements(spec % (self.egg_name, self.egg_version))
+            )
+        except ValueError:
+            raise distutils.errors.DistutilsOptionError(
+                "Invalid distribution name or version syntax: %s-%s" %
+                (self.egg_name, self.egg_version)
+            )
+
+        if self.egg_base is None:
+            dirs = self.distribution.package_dir
+            self.egg_base = (dirs or {}).get('', os.curdir)
+
+        self.ensure_dirname('egg_base')
+        self.egg_info = to_filename(self.egg_name) + '.egg-info'
+        if self.egg_base != os.curdir:
+            self.egg_info = os.path.join(self.egg_base, self.egg_info)
+        if '-' in self.egg_name:
+            self.check_broken_egg_info()
+
+        # Set package version for the benefit of dumber commands
+        # (e.g. sdist, bdist_wininst, etc.)
+        #
+        self.distribution.metadata.version = self.egg_version
+
+        # If we bootstrapped around the lack of a PKG-INFO, as might be the
+        # case in a fresh checkout, make sure that any special tags get added
+        # to the version info
+        #
+        pd = self.distribution._patched_dist
+        if pd is not None and pd.key == self.egg_name.lower():
+            pd._version = self.egg_version
+            pd._parsed_version = parse_version(self.egg_version)
+            self.distribution._patched_dist = None
+
+    def write_or_delete_file(self, what, filename, data, force=False):
+        """Write `data` to `filename` or delete if empty
+
+        If `data` is non-empty, this routine is the same as ``write_file()``.
+        If `data` is empty but not ``None``, this is the same as calling
+        ``delete_file(filename)`.  If `data` is ``None``, then this is a no-op
+        unless `filename` exists, in which case a warning is issued about the
+        orphaned file (if `force` is false), or deleted (if `force` is true).
+        """
+        if data:
+            self.write_file(what, filename, data)
+        elif os.path.exists(filename):
+            if data is None and not force:
+                log.warn(
+                    "%s not set in setup(), but %s exists", what, filename
+                )
+                return
+            else:
+                self.delete_file(filename)
+
+    def write_file(self, what, filename, data):
+        """Write `data` to `filename` (if not a dry run) after announcing it
+
+        `what` is used in a log message to identify what is being written
+        to the file.
+        """
+        log.info("writing %s to %s", what, filename)
+        if six.PY3:
+            data = data.encode("utf-8")
+        if not self.dry_run:
+            f = open(filename, 'wb')
+            f.write(data)
+            f.close()
+
+    def delete_file(self, filename):
+        """Delete `filename` (if not a dry run) after announcing it"""
+        log.info("deleting %s", filename)
+        if not self.dry_run:
+            os.unlink(filename)
+
+    def tagged_version(self):
+        version = self.distribution.get_version()
+        # egg_info may be called more than once for a distribution,
+        # in which case the version string already contains all tags.
+        if self.vtags and version.endswith(self.vtags):
+            return safe_version(version)
+        return safe_version(version + self.vtags)
+
+    def run(self):
+        self.mkpath(self.egg_info)
+        installer = self.distribution.fetch_build_egg
+        for ep in iter_entry_points('egg_info.writers'):
+            ep.require(installer=installer)
+            writer = ep.resolve()
+            writer(self, ep.name, os.path.join(self.egg_info, ep.name))
+
+        # Get rid of native_libs.txt if it was put there by older bdist_egg
+        nl = os.path.join(self.egg_info, "native_libs.txt")
+        if os.path.exists(nl):
+            self.delete_file(nl)
+
+        self.find_sources()
+
+    def tags(self):
+        version = ''
+        if self.tag_build:
+            version += self.tag_build
+        if self.tag_date:
+            version += time.strftime("-%Y%m%d")
+        return version
+
+    def find_sources(self):
+        """Generate SOURCES.txt manifest file"""
+        manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
+        mm = manifest_maker(self.distribution)
+        mm.manifest = manifest_filename
+        mm.run()
+        self.filelist = mm.filelist
+
+    def check_broken_egg_info(self):
+        bei = self.egg_name + '.egg-info'
+        if self.egg_base != os.curdir:
+            bei = os.path.join(self.egg_base, bei)
+        if os.path.exists(bei):
+            log.warn(
+                "-" * 78 + '\n'
+                "Note: Your current .egg-info directory has a '-' in its name;"
+                '\nthis will not work correctly with "setup.py develop".\n\n'
+                'Please rename %s to %s to correct this problem.\n' + '-' * 78,
+                bei, self.egg_info
+            )
+            self.broken_egg_info = self.egg_info
+            self.egg_info = bei  # make it work for now
+
+
+class FileList(_FileList):
+    # Implementations of the various MANIFEST.in commands
+
+    def process_template_line(self, line):
+        # Parse the line: split it up, make sure the right number of words
+        # is there, and return the relevant words.  'action' is always
+        # defined: it's the first word of the line.  Which of the other
+        # three are defined depends on the action; it'll be either
+        # patterns, (dir and patterns), or (dir_pattern).
+        (action, patterns, dir, dir_pattern) = self._parse_template_line(line)
+
+        # OK, now we know that the action is valid and we have the
+        # right number of words on the line for that action -- so we
+        # can proceed with minimal error-checking.
+        if action == 'include':
+            self.debug_print("include " + ' '.join(patterns))
+            for pattern in patterns:
+                if not self.include(pattern):
+                    log.warn("warning: no files found matching '%s'", pattern)
+
+        elif action == 'exclude':
+            self.debug_print("exclude " + ' '.join(patterns))
+            for pattern in patterns:
+                if not self.exclude(pattern):
+                    log.warn(("warning: no previously-included files "
+                              "found matching '%s'"), pattern)
+
+        elif action == 'global-include':
+            self.debug_print("global-include " + ' '.join(patterns))
+            for pattern in patterns:
+                if not self.global_include(pattern):
+                    log.warn(("warning: no files found matching '%s' "
+                              "anywhere in distribution"), pattern)
+
+        elif action == 'global-exclude':
+            self.debug_print("global-exclude " + ' '.join(patterns))
+            for pattern in patterns:
+                if not self.global_exclude(pattern):
+                    log.warn(("warning: no previously-included files matching "
+                              "'%s' found anywhere in distribution"),
+                             pattern)
+
+        elif action == 'recursive-include':
+            self.debug_print("recursive-include %s %s" %
+                             (dir, ' '.join(patterns)))
+            for pattern in patterns:
+                if not self.recursive_include(dir, pattern):
+                    log.warn(("warning: no files found matching '%s' "
+                              "under directory '%s'"),
+                             pattern, dir)
+
+        elif action == 'recursive-exclude':
+            self.debug_print("recursive-exclude %s %s" %
+                             (dir, ' '.join(patterns)))
+            for pattern in patterns:
+                if not self.recursive_exclude(dir, pattern):
+                    log.warn(("warning: no previously-included files matching "
+                              "'%s' found under directory '%s'"),
+                             pattern, dir)
+
+        elif action == 'graft':
+            self.debug_print("graft " + dir_pattern)
+            if not self.graft(dir_pattern):
+                log.warn("warning: no directories found matching '%s'",
+                         dir_pattern)
+
+        elif action == 'prune':
+            self.debug_print("prune " + dir_pattern)
+            if not self.prune(dir_pattern):
+                log.warn(("no previously-included directories found "
+                          "matching '%s'"), dir_pattern)
+
+        else:
+            raise DistutilsInternalError(
+                "this cannot happen: invalid action '%s'" % action)
+
+    def _remove_files(self, predicate):
+        """
+        Remove all files from the file list that match the predicate.
+        Return True if any matching files were removed
+        """
+        found = False
+        for i in range(len(self.files) - 1, -1, -1):
+            if predicate(self.files[i]):
+                self.debug_print(" removing " + self.files[i])
+                del self.files[i]
+                found = True
+        return found
+
+    def include(self, pattern):
+        """Include files that match 'pattern'."""
+        found = [f for f in glob(pattern) if not os.path.isdir(f)]
+        self.extend(found)
+        return bool(found)
+
+    def exclude(self, pattern):
+        """Exclude files that match 'pattern'."""
+        match = translate_pattern(pattern)
+        return self._remove_files(match.match)
+
+    def recursive_include(self, dir, pattern):
+        """
+        Include all files anywhere in 'dir/' that match the pattern.
+        """
+        full_pattern = os.path.join(dir, '**', pattern)
+        found = [f for f in glob(full_pattern, recursive=True)
+                 if not os.path.isdir(f)]
+        self.extend(found)
+        return bool(found)
+
+    def recursive_exclude(self, dir, pattern):
+        """
+        Exclude any file anywhere in 'dir/' that match the pattern.
+        """
+        match = translate_pattern(os.path.join(dir, '**', pattern))
+        return self._remove_files(match.match)
+
+    def graft(self, dir):
+        """Include all files from 'dir/'."""
+        found = [
+            item
+            for match_dir in glob(dir)
+            for item in distutils.filelist.findall(match_dir)
+        ]
+        self.extend(found)
+        return bool(found)
+
+    def prune(self, dir):
+        """Filter out files from 'dir/'."""
+        match = translate_pattern(os.path.join(dir, '**'))
+        return self._remove_files(match.match)
+
+    def global_include(self, pattern):
+        """
+        Include all files anywhere in the current directory that match the
+        pattern. This is very inefficient on large file trees.
+        """
+        if self.allfiles is None:
+            self.findall()
+        match = translate_pattern(os.path.join('**', pattern))
+        found = [f for f in self.allfiles if match.match(f)]
+        self.extend(found)
+        return bool(found)
+
+    def global_exclude(self, pattern):
+        """
+        Exclude all files anywhere that match the pattern.
+        """
+        match = translate_pattern(os.path.join('**', pattern))
+        return self._remove_files(match.match)
+
+    def append(self, item):
+        if item.endswith('\r'):  # Fix older sdists built on Windows
+            item = item[:-1]
+        path = convert_path(item)
+
+        if self._safe_path(path):
+            self.files.append(path)
+
+    def extend(self, paths):
+        self.files.extend(filter(self._safe_path, paths))
+
+    def _repair(self):
+        """
+        Replace self.files with only safe paths
+
+        Because some owners of FileList manipulate the underlying
+        ``files`` attribute directly, this method must be called to
+        repair those paths.
+        """
+        self.files = list(filter(self._safe_path, self.files))
+
+    def _safe_path(self, path):
+        enc_warn = "'%s' not %s encodable -- skipping"
+
+        # To avoid accidental trans-codings errors, first to unicode
+        u_path = unicode_utils.filesys_decode(path)
+        if u_path is None:
+            log.warn("'%s' in unexpected encoding -- skipping" % path)
+            return False
+
+        # Must ensure utf-8 encodability
+        utf8_path = unicode_utils.try_encode(u_path, "utf-8")
+        if utf8_path is None:
+            log.warn(enc_warn, path, 'utf-8')
+            return False
+
+        try:
+            # accept is either way checks out
+            if os.path.exists(u_path) or os.path.exists(utf8_path):
+                return True
+        # this will catch any encode errors decoding u_path
+        except UnicodeEncodeError:
+            log.warn(enc_warn, path, sys.getfilesystemencoding())
+
+
+class manifest_maker(sdist):
+    template = "MANIFEST.in"
+
+    def initialize_options(self):
+        self.use_defaults = 1
+        self.prune = 1
+        self.manifest_only = 1
+        self.force_manifest = 1
+
+    def finalize_options(self):
+        pass
+
+    def run(self):
+        self.filelist = FileList()
+        if not os.path.exists(self.manifest):
+            self.write_manifest()  # it must exist so it'll get in the list
+        self.add_defaults()
+        if os.path.exists(self.template):
+            self.read_template()
+        self.prune_file_list()
+        self.filelist.sort()
+        self.filelist.remove_duplicates()
+        self.write_manifest()
+
+    def _manifest_normalize(self, path):
+        path = unicode_utils.filesys_decode(path)
+        return path.replace(os.sep, '/')
+
+    def write_manifest(self):
+        """
+        Write the file list in 'self.filelist' to the manifest file
+        named by 'self.manifest'.
+        """
+        self.filelist._repair()
+
+        # Now _repairs should encodability, but not unicode
+        files = [self._manifest_normalize(f) for f in self.filelist.files]
+        msg = "writing manifest file '%s'" % self.manifest
+        self.execute(write_file, (self.manifest, files), msg)
+
+    def warn(self, msg):
+        if not self._should_suppress_warning(msg):
+            sdist.warn(self, msg)
+
+    @staticmethod
+    def _should_suppress_warning(msg):
+        """
+        suppress missing-file warnings from sdist
+        """
+        return re.match(r"standard file .*not found", msg)
+
+    def add_defaults(self):
+        sdist.add_defaults(self)
+        self.filelist.append(self.template)
+        self.filelist.append(self.manifest)
+        rcfiles = list(walk_revctrl())
+        if rcfiles:
+            self.filelist.extend(rcfiles)
+        elif os.path.exists(self.manifest):
+            self.read_manifest()
+        ei_cmd = self.get_finalized_command('egg_info')
+        self.filelist.graft(ei_cmd.egg_info)
+
+    def prune_file_list(self):
+        build = self.get_finalized_command('build')
+        base_dir = self.distribution.get_fullname()
+        self.filelist.prune(build.build_base)
+        self.filelist.prune(base_dir)
+        sep = re.escape(os.sep)
+        self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
+                                      is_regex=1)
+
+
+def write_file(filename, contents):
+    """Create a file with the specified name and write 'contents' (a
+    sequence of strings without line terminators) to it.
+    """
+    contents = "\n".join(contents)
+
+    # assuming the contents has been vetted for utf-8 encoding
+    contents = contents.encode("utf-8")
+
+    with open(filename, "wb") as f:  # always write POSIX-style manifest
+        f.write(contents)
+
+
+def write_pkg_info(cmd, basename, filename):
+    log.info("writing %s", filename)
+    if not cmd.dry_run:
+        metadata = cmd.distribution.metadata
+        metadata.version, oldver = cmd.egg_version, metadata.version
+        metadata.name, oldname = cmd.egg_name, metadata.name
+
+        try:
+            # write unescaped data to PKG-INFO, so older pkg_resources
+            # can still parse it
+            metadata.write_pkg_info(cmd.egg_info)
+        finally:
+            metadata.name, metadata.version = oldname, oldver
+
+        safe = getattr(cmd.distribution, 'zip_safe', None)
+
+        bdist_egg.write_safety_flag(cmd.egg_info, safe)
+
+
+def warn_depends_obsolete(cmd, basename, filename):
+    if os.path.exists(filename):
+        log.warn(
+            "WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
+            "Use the install_requires/extras_require setup() args instead."
+        )
+
+
+def _write_requirements(stream, reqs):
+    lines = yield_lines(reqs or ())
+    append_cr = lambda line: line + '\n'
+    lines = map(append_cr, lines)
+    stream.writelines(lines)
+
+
+def write_requirements(cmd, basename, filename):
+    dist = cmd.distribution
+    data = six.StringIO()
+    _write_requirements(data, dist.install_requires)
+    extras_require = dist.extras_require or {}
+    for extra in sorted(extras_require):
+        data.write('\n[{extra}]\n'.format(**vars()))
+        _write_requirements(data, extras_require[extra])
+    cmd.write_or_delete_file("requirements", filename, data.getvalue())
+
+
+def write_setup_requirements(cmd, basename, filename):
+    data = io.StringIO()
+    _write_requirements(data, cmd.distribution.setup_requires)
+    cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
+
+
+def write_toplevel_names(cmd, basename, filename):
+    pkgs = dict.fromkeys(
+        [
+            k.split('.', 1)[0]
+            for k in cmd.distribution.iter_distribution_names()
+        ]
+    )
+    cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
+
+
+def overwrite_arg(cmd, basename, filename):
+    write_arg(cmd, basename, filename, True)
+
+
+def write_arg(cmd, basename, filename, force=False):
+    argname = os.path.splitext(basename)[0]
+    value = getattr(cmd.distribution, argname, None)
+    if value is not None:
+        value = '\n'.join(value) + '\n'
+    cmd.write_or_delete_file(argname, filename, value, force)
+
+
+def write_entries(cmd, basename, filename):
+    ep = cmd.distribution.entry_points
+
+    if isinstance(ep, six.string_types) or ep is None:
+        data = ep
+    elif ep is not None:
+        data = []
+        for section, contents in sorted(ep.items()):
+            if not isinstance(contents, six.string_types):
+                contents = EntryPoint.parse_group(section, contents)
+                contents = '\n'.join(sorted(map(str, contents.values())))
+            data.append('[%s]\n%s\n\n' % (section, contents))
+        data = ''.join(data)
+
+    cmd.write_or_delete_file('entry points', filename, data, True)
+
+
+def get_pkg_info_revision():
+    """
+    Get a -r### off of PKG-INFO Version in case this is an sdist of
+    a subversion revision.
+    """
+    warnings.warn("get_pkg_info_revision is deprecated.", DeprecationWarning)
+    if os.path.exists('PKG-INFO'):
+        with io.open('PKG-INFO') as f:
+            for line in f:
+                match = re.match(r"Version:.*-r(\d+)\s*$", line)
+                if match:
+                    return int(match.group(1))
+    return 0
diff --git a/setuptools/command/install.py b/setuptools/command/install.py
new file mode 100644
index 0000000..31a5ddb
--- /dev/null
+++ b/setuptools/command/install.py
@@ -0,0 +1,125 @@
+from distutils.errors import DistutilsArgError
+import inspect
+import glob
+import warnings
+import platform
+import distutils.command.install as orig
+
+import setuptools
+
+# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
+# now. See https://github.com/pypa/setuptools/issues/199/
+_install = orig.install
+
+
+class install(orig.install):
+    """Use easy_install to install the package, w/dependencies"""
+
+    user_options = orig.install.user_options + [
+        ('old-and-unmanageable', None, "Try not to use this!"),
+        ('single-version-externally-managed', None,
+         "used by system package builders to create 'flat' eggs"),
+    ]
+    boolean_options = orig.install.boolean_options + [
+        'old-and-unmanageable', 'single-version-externally-managed',
+    ]
+    new_commands = [
+        ('install_egg_info', lambda self: True),
+        ('install_scripts', lambda self: True),
+    ]
+    _nc = dict(new_commands)
+
+    def initialize_options(self):
+        orig.install.initialize_options(self)
+        self.old_and_unmanageable = None
+        self.single_version_externally_managed = None
+
+    def finalize_options(self):
+        orig.install.finalize_options(self)
+        if self.root:
+            self.single_version_externally_managed = True
+        elif self.single_version_externally_managed:
+            if not self.root and not self.record:
+                raise DistutilsArgError(
+                    "You must specify --record or --root when building system"
+                    " packages"
+                )
+
+    def handle_extra_path(self):
+        if self.root or self.single_version_externally_managed:
+            # explicit backward-compatibility mode, allow extra_path to work
+            return orig.install.handle_extra_path(self)
+
+        # Ignore extra_path when installing an egg (or being run by another
+        # command without --root or --single-version-externally-managed
+        self.path_file = None
+        self.extra_dirs = ''
+
+    def run(self):
+        # Explicit request for old-style install?  Just do it
+        if self.old_and_unmanageable or self.single_version_externally_managed:
+            return orig.install.run(self)
+
+        if not self._called_from_setup(inspect.currentframe()):
+            # Run in backward-compatibility mode to support bdist_* commands.
+            orig.install.run(self)
+        else:
+            self.do_egg_install()
+
+    @staticmethod
+    def _called_from_setup(run_frame):
+        """
+        Attempt to detect whether run() was called from setup() or by another
+        command.  If called by setup(), the parent caller will be the
+        'run_command' method in 'distutils.dist', and *its* caller will be
+        the 'run_commands' method.  If called any other way, the
+        immediate caller *might* be 'run_command', but it won't have been
+        called by 'run_commands'. Return True in that case or if a call stack
+        is unavailable. Return False otherwise.
+        """
+        if run_frame is None:
+            msg = "Call stack not available. bdist_* commands may fail."
+            warnings.warn(msg)
+            if platform.python_implementation() == 'IronPython':
+                msg = "For best results, pass -X:Frames to enable call stack."
+                warnings.warn(msg)
+            return True
+        res = inspect.getouterframes(run_frame)[2]
+        caller, = res[:1]
+        info = inspect.getframeinfo(caller)
+        caller_module = caller.f_globals.get('__name__', '')
+        return (
+            caller_module == 'distutils.dist'
+            and info.function == 'run_commands'
+        )
+
+    def do_egg_install(self):
+
+        easy_install = self.distribution.get_command_class('easy_install')
+
+        cmd = easy_install(
+            self.distribution, args="x", root=self.root, record=self.record,
+        )
+        cmd.ensure_finalized()  # finalize before bdist_egg munges install cmd
+        cmd.always_copy_from = '.'  # make sure local-dir eggs get installed
+
+        # pick up setup-dir .egg files only: no .egg-info
+        cmd.package_index.scan(glob.glob('*.egg'))
+
+        self.run_command('bdist_egg')
+        args = [self.distribution.get_command_obj('bdist_egg').egg_output]
+
+        if setuptools.bootstrap_install_from:
+            # Bootstrap self-installation of setuptools
+            args.insert(0, setuptools.bootstrap_install_from)
+
+        cmd.args = args
+        cmd.run()
+        setuptools.bootstrap_install_from = None
+
+
+# XXX Python 3.1 doesn't see _nc if this is inside the class
+install.sub_commands = (
+    [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
+    install.new_commands
+)
diff --git a/setuptools/command/install_egg_info.py b/setuptools/command/install_egg_info.py
new file mode 100755
index 0000000..edc4718
--- /dev/null
+++ b/setuptools/command/install_egg_info.py
@@ -0,0 +1,62 @@
+from distutils import log, dir_util
+import os
+
+from setuptools import Command
+from setuptools import namespaces
+from setuptools.archive_util import unpack_archive
+import pkg_resources
+
+
+class install_egg_info(namespaces.Installer, Command):
+    """Install an .egg-info directory for the package"""
+
+    description = "Install an .egg-info directory for the package"
+
+    user_options = [
+        ('install-dir=', 'd', "directory to install to"),
+    ]
+
+    def initialize_options(self):
+        self.install_dir = None
+
+    def finalize_options(self):
+        self.set_undefined_options('install_lib',
+                                   ('install_dir', 'install_dir'))
+        ei_cmd = self.get_finalized_command("egg_info")
+        basename = pkg_resources.Distribution(
+            None, None, ei_cmd.egg_name, ei_cmd.egg_version
+        ).egg_name() + '.egg-info'
+        self.source = ei_cmd.egg_info
+        self.target = os.path.join(self.install_dir, basename)
+        self.outputs = []
+
+    def run(self):
+        self.run_command('egg_info')
+        if os.path.isdir(self.target) and not os.path.islink(self.target):
+            dir_util.remove_tree(self.target, dry_run=self.dry_run)
+        elif os.path.exists(self.target):
+            self.execute(os.unlink, (self.target,), "Removing " + self.target)
+        if not self.dry_run:
+            pkg_resources.ensure_directory(self.target)
+        self.execute(
+            self.copytree, (), "Copying %s to %s" % (self.source, self.target)
+        )
+        self.install_namespaces()
+
+    def get_outputs(self):
+        return self.outputs
+
+    def copytree(self):
+        # Copy the .egg-info tree to site-packages
+        def skimmer(src, dst):
+            # filter out source-control directories; note that 'src' is always
+            # a '/'-separated path, regardless of platform.  'dst' is a
+            # platform-specific path.
+            for skip in '.svn/', 'CVS/':
+                if src.startswith(skip) or '/' + skip in src:
+                    return None
+            self.outputs.append(dst)
+            log.debug("Copying %s to %s", src, dst)
+            return dst
+
+        unpack_archive(self.source, self.target, skimmer)
diff --git a/setuptools/command/install_lib.py b/setuptools/command/install_lib.py
new file mode 100644
index 0000000..2b31c3e
--- /dev/null
+++ b/setuptools/command/install_lib.py
@@ -0,0 +1,121 @@
+import os
+import imp
+from itertools import product, starmap
+import distutils.command.install_lib as orig
+
+
+class install_lib(orig.install_lib):
+    """Don't add compiled flags to filenames of non-Python files"""
+
+    def run(self):
+        self.build()
+        outfiles = self.install()
+        if outfiles is not None:
+            # always compile, in case we have any extension stubs to deal with
+            self.byte_compile(outfiles)
+
+    def get_exclusions(self):
+        """
+        Return a collections.Sized collections.Container of paths to be
+        excluded for single_version_externally_managed installations.
+        """
+        all_packages = (
+            pkg
+            for ns_pkg in self._get_SVEM_NSPs()
+            for pkg in self._all_packages(ns_pkg)
+        )
+
+        excl_specs = product(all_packages, self._gen_exclusion_paths())
+        return set(starmap(self._exclude_pkg_path, excl_specs))
+
+    def _exclude_pkg_path(self, pkg, exclusion_path):
+        """
+        Given a package name and exclusion path within that package,
+        compute the full exclusion path.
+        """
+        parts = pkg.split('.') + [exclusion_path]
+        return os.path.join(self.install_dir, *parts)
+
+    @staticmethod
+    def _all_packages(pkg_name):
+        """
+        >>> list(install_lib._all_packages('foo.bar.baz'))
+        ['foo.bar.baz', 'foo.bar', 'foo']
+        """
+        while pkg_name:
+            yield pkg_name
+            pkg_name, sep, child = pkg_name.rpartition('.')
+
+    def _get_SVEM_NSPs(self):
+        """
+        Get namespace packages (list) but only for
+        single_version_externally_managed installations and empty otherwise.
+        """
+        # TODO: is it necessary to short-circuit here? i.e. what's the cost
+        # if get_finalized_command is called even when namespace_packages is
+        # False?
+        if not self.distribution.namespace_packages:
+            return []
+
+        install_cmd = self.get_finalized_command('install')
+        svem = install_cmd.single_version_externally_managed
+
+        return self.distribution.namespace_packages if svem else []
+
+    @staticmethod
+    def _gen_exclusion_paths():
+        """
+        Generate file paths to be excluded for namespace packages (bytecode
+        cache files).
+        """
+        # always exclude the package module itself
+        yield '__init__.py'
+
+        yield '__init__.pyc'
+        yield '__init__.pyo'
+
+        if not hasattr(imp, 'get_tag'):
+            return
+
+        base = os.path.join('__pycache__', '__init__.' + imp.get_tag())
+        yield base + '.pyc'
+        yield base + '.pyo'
+        yield base + '.opt-1.pyc'
+        yield base + '.opt-2.pyc'
+
+    def copy_tree(
+            self, infile, outfile,
+            preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
+    ):
+        assert preserve_mode and preserve_times and not preserve_symlinks
+        exclude = self.get_exclusions()
+
+        if not exclude:
+            return orig.install_lib.copy_tree(self, infile, outfile)
+
+        # Exclude namespace package __init__.py* files from the output
+
+        from setuptools.archive_util import unpack_directory
+        from distutils import log
+
+        outfiles = []
+
+        def pf(src, dst):
+            if dst in exclude:
+                log.warn("Skipping installation of %s (namespace package)",
+                         dst)
+                return False
+
+            log.info("copying %s -> %s", src, os.path.dirname(dst))
+            outfiles.append(dst)
+            return dst
+
+        unpack_directory(infile, outfile, pf)
+        return outfiles
+
+    def get_outputs(self):
+        outputs = orig.install_lib.get_outputs(self)
+        exclude = self.get_exclusions()
+        if exclude:
+            return [f for f in outputs if f not in exclude]
+        return outputs
diff --git a/setuptools/command/install_scripts.py b/setuptools/command/install_scripts.py
new file mode 100755
index 0000000..1623427
--- /dev/null
+++ b/setuptools/command/install_scripts.py
@@ -0,0 +1,65 @@
+from distutils import log
+import distutils.command.install_scripts as orig
+import os
+import sys
+
+from pkg_resources import Distribution, PathMetadata, ensure_directory
+
+
+class install_scripts(orig.install_scripts):
+    """Do normal script install, plus any egg_info wrapper scripts"""
+
+    def initialize_options(self):
+        orig.install_scripts.initialize_options(self)
+        self.no_ep = False
+
+    def run(self):
+        import setuptools.command.easy_install as ei
+
+        self.run_command("egg_info")
+        if self.distribution.scripts:
+            orig.install_scripts.run(self)  # run first to set up self.outfiles
+        else:
+            self.outfiles = []
+        if self.no_ep:
+            # don't install entry point scripts into .egg file!
+            return
+
+        ei_cmd = self.get_finalized_command("egg_info")
+        dist = Distribution(
+            ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
+            ei_cmd.egg_name, ei_cmd.egg_version,
+        )
+        bs_cmd = self.get_finalized_command('build_scripts')
+        exec_param = getattr(bs_cmd, 'executable', None)
+        bw_cmd = self.get_finalized_command("bdist_wininst")
+        is_wininst = getattr(bw_cmd, '_is_running', False)
+        writer = ei.ScriptWriter
+        if is_wininst:
+            exec_param = "python.exe"
+            writer = ei.WindowsScriptWriter
+        if exec_param == sys.executable:
+            # In case the path to the Python executable contains a space, wrap
+            # it so it's not split up.
+            exec_param = [exec_param]
+        # resolve the writer to the environment
+        writer = writer.best()
+        cmd = writer.command_spec_class.best().from_param(exec_param)
+        for args in writer.get_args(dist, cmd.as_header()):
+            self.write_script(*args)
+
+    def write_script(self, script_name, contents, mode="t", *ignored):
+        """Write an executable file to the scripts directory"""
+        from setuptools.command.easy_install import chmod, current_umask
+
+        log.info("Installing %s script to %s", script_name, self.install_dir)
+        target = os.path.join(self.install_dir, script_name)
+        self.outfiles.append(target)
+
+        mask = current_umask()
+        if not self.dry_run:
+            ensure_directory(target)
+            f = open(target, "w" + mode)
+            f.write(contents)
+            f.close()
+            chmod(target, 0o777 - mask)
diff --git a/setuptools/command/launcher manifest.xml b/setuptools/command/launcher manifest.xml
new file mode 100644
index 0000000..5972a96
--- /dev/null
+++ b/setuptools/command/launcher manifest.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+    <assemblyIdentity version="1.0.0.0"
+                      processorArchitecture="X86"
+                      name="%(name)s"
+                      type="win32"/>
+    <!-- Identify the application security requirements. -->
+    <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
+        <security>
+            <requestedPrivileges>
+                <requestedExecutionLevel level="asInvoker" uiAccess="false"/>
+            </requestedPrivileges>
+        </security>
+    </trustInfo>
+</assembly>
diff --git a/setuptools/command/py36compat.py b/setuptools/command/py36compat.py
new file mode 100644
index 0000000..61063e7
--- /dev/null
+++ b/setuptools/command/py36compat.py
@@ -0,0 +1,136 @@
+import os
+from glob import glob
+from distutils.util import convert_path
+from distutils.command import sdist
+
+from setuptools.extern.six.moves import filter
+
+
+class sdist_add_defaults:
+    """
+    Mix-in providing forward-compatibility for functionality as found in
+    distutils on Python 3.7.
+
+    Do not edit the code in this class except to update functionality
+    as implemented in distutils. Instead, override in the subclass.
+    """
+
+    def add_defaults(self):
+        """Add all the default files to self.filelist:
+          - README or README.txt
+          - setup.py
+          - test/test*.py
+          - all pure Python modules mentioned in setup script
+          - all files pointed by package_data (build_py)
+          - all files defined in data_files.
+          - all files defined as scripts.
+          - all C sources listed as part of extensions or C libraries
+            in the setup script (doesn't catch C headers!)
+        Warns if (README or README.txt) or setup.py are missing; everything
+        else is optional.
+        """
+        self._add_defaults_standards()
+        self._add_defaults_optional()
+        self._add_defaults_python()
+        self._add_defaults_data_files()
+        self._add_defaults_ext()
+        self._add_defaults_c_libs()
+        self._add_defaults_scripts()
+
+    @staticmethod
+    def _cs_path_exists(fspath):
+        """
+        Case-sensitive path existence check
+
+        >>> sdist_add_defaults._cs_path_exists(__file__)
+        True
+        >>> sdist_add_defaults._cs_path_exists(__file__.upper())
+        False
+        """
+        if not os.path.exists(fspath):
+            return False
+        # make absolute so we always have a directory
+        abspath = os.path.abspath(fspath)
+        directory, filename = os.path.split(abspath)
+        return filename in os.listdir(directory)
+
+    def _add_defaults_standards(self):
+        standards = [self.READMES, self.distribution.script_name]
+        for fn in standards:
+            if isinstance(fn, tuple):
+                alts = fn
+                got_it = False
+                for fn in alts:
+                    if self._cs_path_exists(fn):
+                        got_it = True
+                        self.filelist.append(fn)
+                        break
+
+                if not got_it:
+                    self.warn("standard file not found: should have one of " +
+                              ', '.join(alts))
+            else:
+                if self._cs_path_exists(fn):
+                    self.filelist.append(fn)
+                else:
+                    self.warn("standard file '%s' not found" % fn)
+
+    def _add_defaults_optional(self):
+        optional = ['test/test*.py', 'setup.cfg']
+        for pattern in optional:
+            files = filter(os.path.isfile, glob(pattern))
+            self.filelist.extend(files)
+
+    def _add_defaults_python(self):
+        # build_py is used to get:
+        #  - python modules
+        #  - files defined in package_data
+        build_py = self.get_finalized_command('build_py')
+
+        # getting python files
+        if self.distribution.has_pure_modules():
+            self.filelist.extend(build_py.get_source_files())
+
+        # getting package_data files
+        # (computed in build_py.data_files by build_py.finalize_options)
+        for pkg, src_dir, build_dir, filenames in build_py.data_files:
+            for filename in filenames:
+                self.filelist.append(os.path.join(src_dir, filename))
+
+    def _add_defaults_data_files(self):
+        # getting distribution.data_files
+        if self.distribution.has_data_files():
+            for item in self.distribution.data_files:
+                if isinstance(item, str):
+                    # plain file
+                    item = convert_path(item)
+                    if os.path.isfile(item):
+                        self.filelist.append(item)
+                else:
+                    # a (dirname, filenames) tuple
+                    dirname, filenames = item
+                    for f in filenames:
+                        f = convert_path(f)
+                        if os.path.isfile(f):
+                            self.filelist.append(f)
+
+    def _add_defaults_ext(self):
+        if self.distribution.has_ext_modules():
+            build_ext = self.get_finalized_command('build_ext')
+            self.filelist.extend(build_ext.get_source_files())
+
+    def _add_defaults_c_libs(self):
+        if self.distribution.has_c_libraries():
+            build_clib = self.get_finalized_command('build_clib')
+            self.filelist.extend(build_clib.get_source_files())
+
+    def _add_defaults_scripts(self):
+        if self.distribution.has_scripts():
+            build_scripts = self.get_finalized_command('build_scripts')
+            self.filelist.extend(build_scripts.get_source_files())
+
+
+if hasattr(sdist.sdist, '_add_defaults_standards'):
+    # disable the functionality already available upstream
+    class sdist_add_defaults:
+        pass
diff --git a/setuptools/command/register.py b/setuptools/command/register.py
new file mode 100755
index 0000000..8d6336a
--- /dev/null
+++ b/setuptools/command/register.py
@@ -0,0 +1,10 @@
+import distutils.command.register as orig
+
+
+class register(orig.register):
+    __doc__ = orig.register.__doc__
+
+    def run(self):
+        # Make sure that we are using valid current name/version info
+        self.run_command('egg_info')
+        orig.register.run(self)
diff --git a/setuptools/command/rotate.py b/setuptools/command/rotate.py
new file mode 100755
index 0000000..b89353f
--- /dev/null
+++ b/setuptools/command/rotate.py
@@ -0,0 +1,66 @@
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import os
+import shutil
+
+from setuptools.extern import six
+
+from setuptools import Command
+
+
+class rotate(Command):
+    """Delete older distributions"""
+
+    description = "delete older distributions, keeping N newest files"
+    user_options = [
+        ('match=', 'm', "patterns to match (required)"),
+        ('dist-dir=', 'd', "directory where the distributions are"),
+        ('keep=', 'k', "number of matching distributions to keep"),
+    ]
+
+    boolean_options = []
+
+    def initialize_options(self):
+        self.match = None
+        self.dist_dir = None
+        self.keep = None
+
+    def finalize_options(self):
+        if self.match is None:
+            raise DistutilsOptionError(
+                "Must specify one or more (comma-separated) match patterns "
+                "(e.g. '.zip' or '.egg')"
+            )
+        if self.keep is None:
+            raise DistutilsOptionError("Must specify number of files to keep")
+        try:
+            self.keep = int(self.keep)
+        except ValueError:
+            raise DistutilsOptionError("--keep must be an integer")
+        if isinstance(self.match, six.string_types):
+            self.match = [
+                convert_path(p.strip()) for p in self.match.split(',')
+            ]
+        self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+    def run(self):
+        self.run_command("egg_info")
+        from glob import glob
+
+        for pattern in self.match:
+            pattern = self.distribution.get_name() + '*' + pattern
+            files = glob(os.path.join(self.dist_dir, pattern))
+            files = [(os.path.getmtime(f), f) for f in files]
+            files.sort()
+            files.reverse()
+
+            log.info("%d file(s) matching %s", len(files), pattern)
+            files = files[self.keep:]
+            for (t, f) in files:
+                log.info("Deleting %s", f)
+                if not self.dry_run:
+                    if os.path.isdir(f):
+                        shutil.rmtree(f)
+                    else:
+                        os.unlink(f)
diff --git a/setuptools/command/saveopts.py b/setuptools/command/saveopts.py
new file mode 100755
index 0000000..611cec5
--- /dev/null
+++ b/setuptools/command/saveopts.py
@@ -0,0 +1,22 @@
+from setuptools.command.setopt import edit_config, option_base
+
+
+class saveopts(option_base):
+    """Save command-line options to a file"""
+
+    description = "save supplied options to setup.cfg or other config file"
+
+    def run(self):
+        dist = self.distribution
+        settings = {}
+
+        for cmd in dist.command_options:
+
+            if cmd == 'saveopts':
+                continue  # don't save our own options!
+
+            for opt, (src, val) in dist.get_option_dict(cmd).items():
+                if src == "command line":
+                    settings.setdefault(cmd, {})[opt] = val
+
+        edit_config(self.filename, settings, self.dry_run)
diff --git a/setuptools/command/sdist.py b/setuptools/command/sdist.py
new file mode 100755
index 0000000..bcfae4d
--- /dev/null
+++ b/setuptools/command/sdist.py
@@ -0,0 +1,200 @@
+from distutils import log
+import distutils.command.sdist as orig
+import os
+import sys
+import io
+import contextlib
+
+from setuptools.extern import six
+
+from .py36compat import sdist_add_defaults
+
+import pkg_resources
+
+_default_revctrl = list
+
+
+def walk_revctrl(dirname=''):
+    """Find all files under revision control"""
+    for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
+        for item in ep.load()(dirname):
+            yield item
+
+
+class sdist(sdist_add_defaults, orig.sdist):
+    """Smart sdist that finds anything supported by revision control"""
+
+    user_options = [
+        ('formats=', None,
+         "formats for source distribution (comma-separated list)"),
+        ('keep-temp', 'k',
+         "keep the distribution tree around after creating " +
+         "archive file(s)"),
+        ('dist-dir=', 'd',
+         "directory to put the source distribution archive(s) in "
+         "[default: dist]"),
+    ]
+
+    negative_opt = {}
+
+    README_EXTENSIONS = ['', '.rst', '.txt', '.md']
+    READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS)
+
+    def run(self):
+        self.run_command('egg_info')
+        ei_cmd = self.get_finalized_command('egg_info')
+        self.filelist = ei_cmd.filelist
+        self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
+        self.check_readme()
+
+        # Run sub commands
+        for cmd_name in self.get_sub_commands():
+            self.run_command(cmd_name)
+
+        self.make_distribution()
+
+        dist_files = getattr(self.distribution, 'dist_files', [])
+        for file in self.archive_files:
+            data = ('sdist', '', file)
+            if data not in dist_files:
+                dist_files.append(data)
+
+    def initialize_options(self):
+        orig.sdist.initialize_options(self)
+
+        self._default_to_gztar()
+
+    def _default_to_gztar(self):
+        # only needed on Python prior to 3.6.
+        if sys.version_info >= (3, 6, 0, 'beta', 1):
+            return
+        self.formats = ['gztar']
+
+    def make_distribution(self):
+        """
+        Workaround for #516
+        """
+        with self._remove_os_link():
+            orig.sdist.make_distribution(self)
+
+    @staticmethod
+    @contextlib.contextmanager
+    def _remove_os_link():
+        """
+        In a context, remove and restore os.link if it exists
+        """
+
+        class NoValue:
+            pass
+
+        orig_val = getattr(os, 'link', NoValue)
+        try:
+            del os.link
+        except Exception:
+            pass
+        try:
+            yield
+        finally:
+            if orig_val is not NoValue:
+                setattr(os, 'link', orig_val)
+
+    def __read_template_hack(self):
+        # This grody hack closes the template file (MANIFEST.in) if an
+        #  exception occurs during read_template.
+        # Doing so prevents an error when easy_install attempts to delete the
+        #  file.
+        try:
+            orig.sdist.read_template(self)
+        except Exception:
+            _, _, tb = sys.exc_info()
+            tb.tb_next.tb_frame.f_locals['template'].close()
+            raise
+
+    # Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle
+    #  has been fixed, so only override the method if we're using an earlier
+    #  Python.
+    has_leaky_handle = (
+        sys.version_info < (2, 7, 2)
+        or (3, 0) <= sys.version_info < (3, 1, 4)
+        or (3, 2) <= sys.version_info < (3, 2, 1)
+    )
+    if has_leaky_handle:
+        read_template = __read_template_hack
+
+    def _add_defaults_python(self):
+        """getting python files"""
+        if self.distribution.has_pure_modules():
+            build_py = self.get_finalized_command('build_py')
+            self.filelist.extend(build_py.get_source_files())
+            # This functionality is incompatible with include_package_data, and
+            # will in fact create an infinite recursion if include_package_data
+            # is True.  Use of include_package_data will imply that
+            # distutils-style automatic handling of package_data is disabled
+            if not self.distribution.include_package_data:
+                for _, src_dir, _, filenames in build_py.data_files:
+                    self.filelist.extend([os.path.join(src_dir, filename)
+                                          for filename in filenames])
+
+    def _add_defaults_data_files(self):
+        try:
+            if six.PY2:
+                sdist_add_defaults._add_defaults_data_files(self)
+            else:
+                super()._add_defaults_data_files()
+        except TypeError:
+            log.warn("data_files contains unexpected objects")
+
+    def check_readme(self):
+        for f in self.READMES:
+            if os.path.exists(f):
+                return
+        else:
+            self.warn(
+                "standard file not found: should have one of " +
+                ', '.join(self.READMES)
+            )
+
+    def make_release_tree(self, base_dir, files):
+        orig.sdist.make_release_tree(self, base_dir, files)
+
+        # Save any egg_info command line options used to create this sdist
+        dest = os.path.join(base_dir, 'setup.cfg')
+        if hasattr(os, 'link') and os.path.exists(dest):
+            # unlink and re-copy, since it might be hard-linked, and
+            # we don't want to change the source version
+            os.unlink(dest)
+            self.copy_file('setup.cfg', dest)
+
+        self.get_finalized_command('egg_info').save_version_info(dest)
+
+    def _manifest_is_not_generated(self):
+        # check for special comment used in 2.7.1 and higher
+        if not os.path.isfile(self.manifest):
+            return False
+
+        with io.open(self.manifest, 'rb') as fp:
+            first_line = fp.readline()
+        return (first_line !=
+                '# file GENERATED by distutils, do NOT edit\n'.encode())
+
+    def read_manifest(self):
+        """Read the manifest file (named by 'self.manifest') and use it to
+        fill in 'self.filelist', the list of files to include in the source
+        distribution.
+        """
+        log.info("reading manifest file '%s'", self.manifest)
+        manifest = open(self.manifest, 'rb')
+        for line in manifest:
+            # The manifest must contain UTF-8. See #303.
+            if six.PY3:
+                try:
+                    line = line.decode('UTF-8')
+                except UnicodeDecodeError:
+                    log.warn("%r not UTF-8 decodable -- skipping" % line)
+                    continue
+            # ignore comments and blank lines
+            line = line.strip()
+            if line.startswith('#') or not line:
+                continue
+            self.filelist.append(line)
+        manifest.close()
diff --git a/setuptools/command/setopt.py b/setuptools/command/setopt.py
new file mode 100755
index 0000000..7e57cc0
--- /dev/null
+++ b/setuptools/command/setopt.py
@@ -0,0 +1,149 @@
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import distutils
+import os
+
+from setuptools.extern.six.moves import configparser
+
+from setuptools import Command
+
+__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
+
+
+def config_file(kind="local"):
+    """Get the filename of the distutils, local, global, or per-user config
+
+    `kind` must be one of "local", "global", or "user"
+    """
+    if kind == 'local':
+        return 'setup.cfg'
+    if kind == 'global':
+        return os.path.join(
+            os.path.dirname(distutils.__file__), 'distutils.cfg'
+        )
+    if kind == 'user':
+        dot = os.name == 'posix' and '.' or ''
+        return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
+    raise ValueError(
+        "config_file() type must be 'local', 'global', or 'user'", kind
+    )
+
+
+def edit_config(filename, settings, dry_run=False):
+    """Edit a configuration file to include `settings`
+
+    `settings` is a dictionary of dictionaries or ``None`` values, keyed by
+    command/section name.  A ``None`` value means to delete the entire section,
+    while a dictionary lists settings to be changed or deleted in that section.
+    A setting of ``None`` means to delete that setting.
+    """
+    log.debug("Reading configuration from %s", filename)
+    opts = configparser.RawConfigParser()
+    opts.read([filename])
+    for section, options in settings.items():
+        if options is None:
+            log.info("Deleting section [%s] from %s", section, filename)
+            opts.remove_section(section)
+        else:
+            if not opts.has_section(section):
+                log.debug("Adding new section [%s] to %s", section, filename)
+                opts.add_section(section)
+            for option, value in options.items():
+                if value is None:
+                    log.debug(
+                        "Deleting %s.%s from %s",
+                        section, option, filename
+                    )
+                    opts.remove_option(section, option)
+                    if not opts.options(section):
+                        log.info("Deleting empty [%s] section from %s",
+                                 section, filename)
+                        opts.remove_section(section)
+                else:
+                    log.debug(
+                        "Setting %s.%s to %r in %s",
+                        section, option, value, filename
+                    )
+                    opts.set(section, option, value)
+
+    log.info("Writing %s", filename)
+    if not dry_run:
+        with open(filename, 'w') as f:
+            opts.write(f)
+
+
+class option_base(Command):
+    """Abstract base class for commands that mess with config files"""
+
+    user_options = [
+        ('global-config', 'g',
+         "save options to the site-wide distutils.cfg file"),
+        ('user-config', 'u',
+         "save options to the current user's pydistutils.cfg file"),
+        ('filename=', 'f',
+         "configuration file to use (default=setup.cfg)"),
+    ]
+
+    boolean_options = [
+        'global-config', 'user-config',
+    ]
+
+    def initialize_options(self):
+        self.global_config = None
+        self.user_config = None
+        self.filename = None
+
+    def finalize_options(self):
+        filenames = []
+        if self.global_config:
+            filenames.append(config_file('global'))
+        if self.user_config:
+            filenames.append(config_file('user'))
+        if self.filename is not None:
+            filenames.append(self.filename)
+        if not filenames:
+            filenames.append(config_file('local'))
+        if len(filenames) > 1:
+            raise DistutilsOptionError(
+                "Must specify only one configuration file option",
+                filenames
+            )
+        self.filename, = filenames
+
+
+class setopt(option_base):
+    """Save command-line options to a file"""
+
+    description = "set an option in setup.cfg or another config file"
+
+    user_options = [
+        ('command=', 'c', 'command to set an option for'),
+        ('option=', 'o', 'option to set'),
+        ('set-value=', 's', 'value of the option'),
+        ('remove', 'r', 'remove (unset) the value'),
+    ] + option_base.user_options
+
+    boolean_options = option_base.boolean_options + ['remove']
+
+    def initialize_options(self):
+        option_base.initialize_options(self)
+        self.command = None
+        self.option = None
+        self.set_value = None
+        self.remove = None
+
+    def finalize_options(self):
+        option_base.finalize_options(self)
+        if self.command is None or self.option is None:
+            raise DistutilsOptionError("Must specify --command *and* --option")
+        if self.set_value is None and not self.remove:
+            raise DistutilsOptionError("Must specify --set-value or --remove")
+
+    def run(self):
+        edit_config(
+            self.filename, {
+                self.command: {self.option.replace('-', '_'): self.set_value}
+            },
+            self.dry_run
+        )
diff --git a/setuptools/command/test.py b/setuptools/command/test.py
new file mode 100644
index 0000000..51aee1f
--- /dev/null
+++ b/setuptools/command/test.py
@@ -0,0 +1,268 @@
+import os
+import operator
+import sys
+import contextlib
+import itertools
+import unittest
+from distutils.errors import DistutilsError, DistutilsOptionError
+from distutils import log
+from unittest import TestLoader
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import map, filter
+
+from pkg_resources import (resource_listdir, resource_exists, normalize_path,
+                           working_set, _namespace_packages, evaluate_marker,
+                           add_activation_listener, require, EntryPoint)
+from setuptools import Command
+
+
+class ScanningLoader(TestLoader):
+
+    def __init__(self):
+        TestLoader.__init__(self)
+        self._visited = set()
+
+    def loadTestsFromModule(self, module, pattern=None):
+        """Return a suite of all tests cases contained in the given module
+
+        If the module is a package, load tests from all the modules in it.
+        If the module has an ``additional_tests`` function, call it and add
+        the return value to the tests.
+        """
+        if module in self._visited:
+            return None
+        self._visited.add(module)
+
+        tests = []
+        tests.append(TestLoader.loadTestsFromModule(self, module))
+
+        if hasattr(module, "additional_tests"):
+            tests.append(module.additional_tests())
+
+        if hasattr(module, '__path__'):
+            for file in resource_listdir(module.__name__, ''):
+                if file.endswith('.py') and file != '__init__.py':
+                    submodule = module.__name__ + '.' + file[:-3]
+                else:
+                    if resource_exists(module.__name__, file + '/__init__.py'):
+                        submodule = module.__name__ + '.' + file
+                    else:
+                        continue
+                tests.append(self.loadTestsFromName(submodule))
+
+        if len(tests) != 1:
+            return self.suiteClass(tests)
+        else:
+            return tests[0]  # don't create a nested suite for only one return
+
+
+# adapted from jaraco.classes.properties:NonDataProperty
+class NonDataProperty(object):
+    def __init__(self, fget):
+        self.fget = fget
+
+    def __get__(self, obj, objtype=None):
+        if obj is None:
+            return self
+        return self.fget(obj)
+
+
+class test(Command):
+    """Command to run unit tests after in-place build"""
+
+    description = "run unit tests after in-place build"
+
+    user_options = [
+        ('test-module=', 'm', "Run 'test_suite' in specified module"),
+        ('test-suite=', 's',
+         "Run single test, case or suite (e.g. 'module.test_suite')"),
+        ('test-runner=', 'r', "Test runner to use"),
+    ]
+
+    def initialize_options(self):
+        self.test_suite = None
+        self.test_module = None
+        self.test_loader = None
+        self.test_runner = None
+
+    def finalize_options(self):
+
+        if self.test_suite and self.test_module:
+            msg = "You may specify a module or a suite, but not both"
+            raise DistutilsOptionError(msg)
+
+        if self.test_suite is None:
+            if self.test_module is None:
+                self.test_suite = self.distribution.test_suite
+            else:
+                self.test_suite = self.test_module + ".test_suite"
+
+        if self.test_loader is None:
+            self.test_loader = getattr(self.distribution, 'test_loader', None)
+        if self.test_loader is None:
+            self.test_loader = "setuptools.command.test:ScanningLoader"
+        if self.test_runner is None:
+            self.test_runner = getattr(self.distribution, 'test_runner', None)
+
+    @NonDataProperty
+    def test_args(self):
+        return list(self._test_args())
+
+    def _test_args(self):
+        if not self.test_suite and sys.version_info >= (2, 7):
+            yield 'discover'
+        if self.verbose:
+            yield '--verbose'
+        if self.test_suite:
+            yield self.test_suite
+
+    def with_project_on_sys_path(self, func):
+        """
+        Backward compatibility for project_on_sys_path context.
+        """
+        with self.project_on_sys_path():
+            func()
+
+    @contextlib.contextmanager
+    def project_on_sys_path(self, include_dists=[]):
+        with_2to3 = six.PY3 and getattr(self.distribution, 'use_2to3', False)
+
+        if with_2to3:
+            # If we run 2to3 we can not do this inplace:
+
+            # Ensure metadata is up-to-date
+            self.reinitialize_command('build_py', inplace=0)
+            self.run_command('build_py')
+            bpy_cmd = self.get_finalized_command("build_py")
+            build_path = normalize_path(bpy_cmd.build_lib)
+
+            # Build extensions
+            self.reinitialize_command('egg_info', egg_base=build_path)
+            self.run_command('egg_info')
+
+            self.reinitialize_command('build_ext', inplace=0)
+            self.run_command('build_ext')
+        else:
+            # Without 2to3 inplace works fine:
+            self.run_command('egg_info')
+
+            # Build extensions in-place
+            self.reinitialize_command('build_ext', inplace=1)
+            self.run_command('build_ext')
+
+        ei_cmd = self.get_finalized_command("egg_info")
+
+        old_path = sys.path[:]
+        old_modules = sys.modules.copy()
+
+        try:
+            project_path = normalize_path(ei_cmd.egg_base)
+            sys.path.insert(0, project_path)
+            working_set.__init__()
+            add_activation_listener(lambda dist: dist.activate())
+            require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
+            with self.paths_on_pythonpath([project_path]):
+                yield
+        finally:
+            sys.path[:] = old_path
+            sys.modules.clear()
+            sys.modules.update(old_modules)
+            working_set.__init__()
+
+    @staticmethod
+    @contextlib.contextmanager
+    def paths_on_pythonpath(paths):
+        """
+        Add the indicated paths to the head of the PYTHONPATH environment
+        variable so that subprocesses will also see the packages at
+        these paths.
+
+        Do this in a context that restores the value on exit.
+        """
+        nothing = object()
+        orig_pythonpath = os.environ.get('PYTHONPATH', nothing)
+        current_pythonpath = os.environ.get('PYTHONPATH', '')
+        try:
+            prefix = os.pathsep.join(paths)
+            to_join = filter(None, [prefix, current_pythonpath])
+            new_path = os.pathsep.join(to_join)
+            if new_path:
+                os.environ['PYTHONPATH'] = new_path
+            yield
+        finally:
+            if orig_pythonpath is nothing:
+                os.environ.pop('PYTHONPATH', None)
+            else:
+                os.environ['PYTHONPATH'] = orig_pythonpath
+
+    @staticmethod
+    def install_dists(dist):
+        """
+        Install the requirements indicated by self.distribution and
+        return an iterable of the dists that were built.
+        """
+        ir_d = dist.fetch_build_eggs(dist.install_requires)
+        tr_d = dist.fetch_build_eggs(dist.tests_require or [])
+        er_d = dist.fetch_build_eggs(
+            v for k, v in dist.extras_require.items()
+            if k.startswith(':') and evaluate_marker(k[1:])
+        )
+        return itertools.chain(ir_d, tr_d, er_d)
+
+    def run(self):
+        installed_dists = self.install_dists(self.distribution)
+
+        cmd = ' '.join(self._argv)
+        if self.dry_run:
+            self.announce('skipping "%s" (dry run)' % cmd)
+            return
+
+        self.announce('running "%s"' % cmd)
+
+        paths = map(operator.attrgetter('location'), installed_dists)
+        with self.paths_on_pythonpath(paths):
+            with self.project_on_sys_path():
+                self.run_tests()
+
+    def run_tests(self):
+        # Purge modules under test from sys.modules. The test loader will
+        # re-import them from the build location. Required when 2to3 is used
+        # with namespace packages.
+        if six.PY3 and getattr(self.distribution, 'use_2to3', False):
+            module = self.test_suite.split('.')[0]
+            if module in _namespace_packages:
+                del_modules = []
+                if module in sys.modules:
+                    del_modules.append(module)
+                module += '.'
+                for name in sys.modules:
+                    if name.startswith(module):
+                        del_modules.append(name)
+                list(map(sys.modules.__delitem__, del_modules))
+
+        test = unittest.main(
+            None, None, self._argv,
+            testLoader=self._resolve_as_ep(self.test_loader),
+            testRunner=self._resolve_as_ep(self.test_runner),
+            exit=False,
+        )
+        if not test.result.wasSuccessful():
+            msg = 'Test failed: %s' % test.result
+            self.announce(msg, log.ERROR)
+            raise DistutilsError(msg)
+
+    @property
+    def _argv(self):
+        return ['unittest'] + self.test_args
+
+    @staticmethod
+    def _resolve_as_ep(val):
+        """
+        Load the indicated attribute value, called, as a as if it were
+        specified as an entry point.
+        """
+        if val is None:
+            return
+        parsed = EntryPoint.parse("x=" + val)
+        return parsed.resolve()()
diff --git a/setuptools/command/upload.py b/setuptools/command/upload.py
new file mode 100644
index 0000000..a44173a
--- /dev/null
+++ b/setuptools/command/upload.py
@@ -0,0 +1,42 @@
+import getpass
+from distutils.command import upload as orig
+
+
+class upload(orig.upload):
+    """
+    Override default upload behavior to obtain password
+    in a variety of different ways.
+    """
+
+    def finalize_options(self):
+        orig.upload.finalize_options(self)
+        self.username = (
+            self.username or
+            getpass.getuser()
+        )
+        # Attempt to obtain password. Short circuit evaluation at the first
+        # sign of success.
+        self.password = (
+            self.password or
+            self._load_password_from_keyring() or
+            self._prompt_for_password()
+        )
+
+    def _load_password_from_keyring(self):
+        """
+        Attempt to load password from keyring. Suppress Exceptions.
+        """
+        try:
+            keyring = __import__('keyring')
+            return keyring.get_password(self.repository, self.username)
+        except Exception:
+            pass
+
+    def _prompt_for_password(self):
+        """
+        Prompt for a password on the tty. Suppress Exceptions.
+        """
+        try:
+            return getpass.getpass()
+        except (Exception, KeyboardInterrupt):
+            pass
diff --git a/setuptools/command/upload_docs.py b/setuptools/command/upload_docs.py
new file mode 100644
index 0000000..07aa564
--- /dev/null
+++ b/setuptools/command/upload_docs.py
@@ -0,0 +1,206 @@
+# -*- coding: utf-8 -*-
+"""upload_docs
+
+Implements a Distutils 'upload_docs' subcommand (upload documentation to
+PyPI's pythonhosted.org).
+"""
+
+from base64 import standard_b64encode
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import os
+import socket
+import zipfile
+import tempfile
+import shutil
+import itertools
+import functools
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import http_client, urllib
+
+from pkg_resources import iter_entry_points
+from .upload import upload
+
+
+def _encode(s):
+    errors = 'surrogateescape' if six.PY3 else 'strict'
+    return s.encode('utf-8', errors)
+
+
+class upload_docs(upload):
+    # override the default repository as upload_docs isn't
+    # supported by Warehouse (and won't be).
+    DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
+
+    description = 'Upload documentation to PyPI'
+
+    user_options = [
+        ('repository=', 'r',
+         "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
+        ('show-response', None,
+         'display full response text from server'),
+        ('upload-dir=', None, 'directory to upload'),
+    ]
+    boolean_options = upload.boolean_options
+
+    def has_sphinx(self):
+        if self.upload_dir is None:
+            for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
+                return True
+
+    sub_commands = [('build_sphinx', has_sphinx)]
+
+    def initialize_options(self):
+        upload.initialize_options(self)
+        self.upload_dir = None
+        self.target_dir = None
+
+    def finalize_options(self):
+        upload.finalize_options(self)
+        if self.upload_dir is None:
+            if self.has_sphinx():
+                build_sphinx = self.get_finalized_command('build_sphinx')
+                self.target_dir = build_sphinx.builder_target_dir
+            else:
+                build = self.get_finalized_command('build')
+                self.target_dir = os.path.join(build.build_base, 'docs')
+        else:
+            self.ensure_dirname('upload_dir')
+            self.target_dir = self.upload_dir
+        if 'pypi.python.org' in self.repository:
+            log.warn("Upload_docs command is deprecated. Use RTD instead.")
+        self.announce('Using upload directory %s' % self.target_dir)
+
+    def create_zipfile(self, filename):
+        zip_file = zipfile.ZipFile(filename, "w")
+        try:
+            self.mkpath(self.target_dir)  # just in case
+            for root, dirs, files in os.walk(self.target_dir):
+                if root == self.target_dir and not files:
+                    tmpl = "no files found in upload directory '%s'"
+                    raise DistutilsOptionError(tmpl % self.target_dir)
+                for name in files:
+                    full = os.path.join(root, name)
+                    relative = root[len(self.target_dir):].lstrip(os.path.sep)
+                    dest = os.path.join(relative, name)
+                    zip_file.write(full, dest)
+        finally:
+            zip_file.close()
+
+    def run(self):
+        # Run sub commands
+        for cmd_name in self.get_sub_commands():
+            self.run_command(cmd_name)
+
+        tmp_dir = tempfile.mkdtemp()
+        name = self.distribution.metadata.get_name()
+        zip_file = os.path.join(tmp_dir, "%s.zip" % name)
+        try:
+            self.create_zipfile(zip_file)
+            self.upload_file(zip_file)
+        finally:
+            shutil.rmtree(tmp_dir)
+
+    @staticmethod
+    def _build_part(item, sep_boundary):
+        key, values = item
+        title = '\nContent-Disposition: form-data; name="%s"' % key
+        # handle multiple entries for the same name
+        if not isinstance(values, list):
+            values = [values]
+        for value in values:
+            if isinstance(value, tuple):
+                title += '; filename="%s"' % value[0]
+                value = value[1]
+            else:
+                value = _encode(value)
+            yield sep_boundary
+            yield _encode(title)
+            yield b"\n\n"
+            yield value
+            if value and value[-1:] == b'\r':
+                yield b'\n'  # write an extra newline (lurve Macs)
+
+    @classmethod
+    def _build_multipart(cls, data):
+        """
+        Build up the MIME payload for the POST data
+        """
+        boundary = b'--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
+        sep_boundary = b'\n--' + boundary
+        end_boundary = sep_boundary + b'--'
+        end_items = end_boundary, b"\n",
+        builder = functools.partial(
+            cls._build_part,
+            sep_boundary=sep_boundary,
+        )
+        part_groups = map(builder, data.items())
+        parts = itertools.chain.from_iterable(part_groups)
+        body_items = itertools.chain(parts, end_items)
+        content_type = 'multipart/form-data; boundary=%s' % boundary.decode('ascii')
+        return b''.join(body_items), content_type
+
+    def upload_file(self, filename):
+        with open(filename, 'rb') as f:
+            content = f.read()
+        meta = self.distribution.metadata
+        data = {
+            ':action': 'doc_upload',
+            'name': meta.get_name(),
+            'content': (os.path.basename(filename), content),
+        }
+        # set up the authentication
+        credentials = _encode(self.username + ':' + self.password)
+        credentials = standard_b64encode(credentials)
+        if six.PY3:
+            credentials = credentials.decode('ascii')
+        auth = "Basic " + credentials
+
+        body, ct = self._build_multipart(data)
+
+        msg = "Submitting documentation to %s" % (self.repository)
+        self.announce(msg, log.INFO)
+
+        # build the Request
+        # We can't use urllib2 since we need to send the Basic
+        # auth right with the first request
+        schema, netloc, url, params, query, fragments = \
+            urllib.parse.urlparse(self.repository)
+        assert not params and not query and not fragments
+        if schema == 'http':
+            conn = http_client.HTTPConnection(netloc)
+        elif schema == 'https':
+            conn = http_client.HTTPSConnection(netloc)
+        else:
+            raise AssertionError("unsupported schema " + schema)
+
+        data = ''
+        try:
+            conn.connect()
+            conn.putrequest("POST", url)
+            content_type = ct
+            conn.putheader('Content-type', content_type)
+            conn.putheader('Content-length', str(len(body)))
+            conn.putheader('Authorization', auth)
+            conn.endheaders()
+            conn.send(body)
+        except socket.error as e:
+            self.announce(str(e), log.ERROR)
+            return
+
+        r = conn.getresponse()
+        if r.status == 200:
+            msg = 'Server response (%s): %s' % (r.status, r.reason)
+            self.announce(msg, log.INFO)
+        elif r.status == 301:
+            location = r.getheader('Location')
+            if location is None:
+                location = 'https://pythonhosted.org/%s/' % meta.get_name()
+            msg = 'Upload successful. Visit %s' % location
+            self.announce(msg, log.INFO)
+        else:
+            msg = 'Upload failed (%s): %s' % (r.status, r.reason)
+            self.announce(msg, log.ERROR)
+        if self.show_response:
+            print('-' * 75, r.read(), '-' * 75)
diff --git a/setuptools/config.py b/setuptools/config.py
new file mode 100644
index 0000000..8eddcae
--- /dev/null
+++ b/setuptools/config.py
@@ -0,0 +1,556 @@
+from __future__ import absolute_import, unicode_literals
+import io
+import os
+import sys
+from collections import defaultdict
+from functools import partial
+from importlib import import_module
+
+from distutils.errors import DistutilsOptionError, DistutilsFileError
+from setuptools.extern.six import string_types
+
+
+def read_configuration(
+        filepath, find_others=False, ignore_option_errors=False):
+    """Read given configuration file and returns options from it as a dict.
+
+    :param str|unicode filepath: Path to configuration file
+        to get options from.
+
+    :param bool find_others: Whether to search for other configuration files
+        which could be on in various places.
+
+    :param bool ignore_option_errors: Whether to silently ignore
+        options, values of which could not be resolved (e.g. due to exceptions
+        in directives such as file:, attr:, etc.).
+        If False exceptions are propagated as expected.
+
+    :rtype: dict
+    """
+    from setuptools.dist import Distribution, _Distribution
+
+    filepath = os.path.abspath(filepath)
+
+    if not os.path.isfile(filepath):
+        raise DistutilsFileError(
+            'Configuration file %s does not exist.' % filepath)
+
+    current_directory = os.getcwd()
+    os.chdir(os.path.dirname(filepath))
+
+    try:
+        dist = Distribution()
+
+        filenames = dist.find_config_files() if find_others else []
+        if filepath not in filenames:
+            filenames.append(filepath)
+
+        _Distribution.parse_config_files(dist, filenames=filenames)
+
+        handlers = parse_configuration(
+            dist, dist.command_options,
+            ignore_option_errors=ignore_option_errors)
+
+    finally:
+        os.chdir(current_directory)
+
+    return configuration_to_dict(handlers)
+
+
+def configuration_to_dict(handlers):
+    """Returns configuration data gathered by given handlers as a dict.
+
+    :param list[ConfigHandler] handlers: Handlers list,
+        usually from parse_configuration()
+
+    :rtype: dict
+    """
+    config_dict = defaultdict(dict)
+
+    for handler in handlers:
+
+        obj_alias = handler.section_prefix
+        target_obj = handler.target_obj
+
+        for option in handler.set_options:
+            getter = getattr(target_obj, 'get_%s' % option, None)
+
+            if getter is None:
+                value = getattr(target_obj, option)
+
+            else:
+                value = getter()
+
+            config_dict[obj_alias][option] = value
+
+    return config_dict
+
+
+def parse_configuration(
+        distribution, command_options, ignore_option_errors=False):
+    """Performs additional parsing of configuration options
+    for a distribution.
+
+    Returns a list of used option handlers.
+
+    :param Distribution distribution:
+    :param dict command_options:
+    :param bool ignore_option_errors: Whether to silently ignore
+        options, values of which could not be resolved (e.g. due to exceptions
+        in directives such as file:, attr:, etc.).
+        If False exceptions are propagated as expected.
+    :rtype: list
+    """
+    meta = ConfigMetadataHandler(
+        distribution.metadata, command_options, ignore_option_errors)
+    meta.parse()
+
+    options = ConfigOptionsHandler(
+        distribution, command_options, ignore_option_errors)
+    options.parse()
+
+    return meta, options
+
+
+class ConfigHandler(object):
+    """Handles metadata supplied in configuration files."""
+
+    section_prefix = None
+    """Prefix for config sections handled by this handler.
+    Must be provided by class heirs.
+
+    """
+
+    aliases = {}
+    """Options aliases.
+    For compatibility with various packages. E.g.: d2to1 and pbr.
+    Note: `-` in keys is replaced with `_` by config parser.
+
+    """
+
+    def __init__(self, target_obj, options, ignore_option_errors=False):
+        sections = {}
+
+        section_prefix = self.section_prefix
+        for section_name, section_options in options.items():
+            if not section_name.startswith(section_prefix):
+                continue
+
+            section_name = section_name.replace(section_prefix, '').strip('.')
+            sections[section_name] = section_options
+
+        self.ignore_option_errors = ignore_option_errors
+        self.target_obj = target_obj
+        self.sections = sections
+        self.set_options = []
+
+    @property
+    def parsers(self):
+        """Metadata item name to parser function mapping."""
+        raise NotImplementedError(
+            '%s must provide .parsers property' % self.__class__.__name__)
+
+    def __setitem__(self, option_name, value):
+        unknown = tuple()
+        target_obj = self.target_obj
+
+        # Translate alias into real name.
+        option_name = self.aliases.get(option_name, option_name)
+
+        current_value = getattr(target_obj, option_name, unknown)
+
+        if current_value is unknown:
+            raise KeyError(option_name)
+
+        if current_value:
+            # Already inhabited. Skipping.
+            return
+
+        skip_option = False
+        parser = self.parsers.get(option_name)
+        if parser:
+            try:
+                value = parser(value)
+
+            except Exception:
+                skip_option = True
+                if not self.ignore_option_errors:
+                    raise
+
+        if skip_option:
+            return
+
+        setter = getattr(target_obj, 'set_%s' % option_name, None)
+        if setter is None:
+            setattr(target_obj, option_name, value)
+        else:
+            setter(value)
+
+        self.set_options.append(option_name)
+
+    @classmethod
+    def _parse_list(cls, value, separator=','):
+        """Represents value as a list.
+
+        Value is split either by separator (defaults to comma) or by lines.
+
+        :param value:
+        :param separator: List items separator character.
+        :rtype: list
+        """
+        if isinstance(value, list):  # _get_parser_compound case
+            return value
+
+        if '\n' in value:
+            value = value.splitlines()
+        else:
+            value = value.split(separator)
+
+        return [chunk.strip() for chunk in value if chunk.strip()]
+
+    @classmethod
+    def _parse_dict(cls, value):
+        """Represents value as a dict.
+
+        :param value:
+        :rtype: dict
+        """
+        separator = '='
+        result = {}
+        for line in cls._parse_list(value):
+            key, sep, val = line.partition(separator)
+            if sep != separator:
+                raise DistutilsOptionError(
+                    'Unable to parse option value to dict: %s' % value)
+            result[key.strip()] = val.strip()
+
+        return result
+
+    @classmethod
+    def _parse_bool(cls, value):
+        """Represents value as boolean.
+
+        :param value:
+        :rtype: bool
+        """
+        value = value.lower()
+        return value in ('1', 'true', 'yes')
+
+    @classmethod
+    def _parse_file(cls, value):
+        """Represents value as a string, allowing including text
+        from nearest files using `file:` directive.
+
+        Directive is sandboxed and won't reach anything outside
+        directory with setup.py.
+
+        Examples:
+            file: LICENSE
+            file: README.rst, CHANGELOG.md, src/file.txt
+
+        :param str value:
+        :rtype: str
+        """
+        include_directive = 'file:'
+
+        if not isinstance(value, string_types):
+            return value
+
+        if not value.startswith(include_directive):
+            return value
+
+        spec = value[len(include_directive):]
+        filepaths = (os.path.abspath(path.strip()) for path in spec.split(','))
+        return '\n'.join(
+            cls._read_file(path)
+            for path in filepaths
+            if (cls._assert_local(path) or True)
+            and os.path.isfile(path)
+        )
+
+    @staticmethod
+    def _assert_local(filepath):
+        if not filepath.startswith(os.getcwd()):
+            raise DistutilsOptionError(
+                '`file:` directive can not access %s' % filepath)
+
+    @staticmethod
+    def _read_file(filepath):
+        with io.open(filepath, encoding='utf-8') as f:
+            return f.read()
+
+    @classmethod
+    def _parse_attr(cls, value):
+        """Represents value as a module attribute.
+
+        Examples:
+            attr: package.attr
+            attr: package.module.attr
+
+        :param str value:
+        :rtype: str
+        """
+        attr_directive = 'attr:'
+        if not value.startswith(attr_directive):
+            return value
+
+        attrs_path = value.replace(attr_directive, '').strip().split('.')
+        attr_name = attrs_path.pop()
+
+        module_name = '.'.join(attrs_path)
+        module_name = module_name or '__init__'
+
+        sys.path.insert(0, os.getcwd())
+        try:
+            module = import_module(module_name)
+            value = getattr(module, attr_name)
+
+        finally:
+            sys.path = sys.path[1:]
+
+        return value
+
+    @classmethod
+    def _get_parser_compound(cls, *parse_methods):
+        """Returns parser function to represents value as a list.
+
+        Parses a value applying given methods one after another.
+
+        :param parse_methods:
+        :rtype: callable
+        """
+        def parse(value):
+            parsed = value
+
+            for method in parse_methods:
+                parsed = method(parsed)
+
+            return parsed
+
+        return parse
+
+    @classmethod
+    def _parse_section_to_dict(cls, section_options, values_parser=None):
+        """Parses section options into a dictionary.
+
+        Optionally applies a given parser to values.
+
+        :param dict section_options:
+        :param callable values_parser:
+        :rtype: dict
+        """
+        value = {}
+        values_parser = values_parser or (lambda val: val)
+        for key, (_, val) in section_options.items():
+            value[key] = values_parser(val)
+        return value
+
+    def parse_section(self, section_options):
+        """Parses configuration file section.
+
+        :param dict section_options:
+        """
+        for (name, (_, value)) in section_options.items():
+            try:
+                self[name] = value
+
+            except KeyError:
+                pass  # Keep silent for a new option may appear anytime.
+
+    def parse(self):
+        """Parses configuration file items from one
+        or more related sections.
+
+        """
+        for section_name, section_options in self.sections.items():
+
+            method_postfix = ''
+            if section_name:  # [section.option] variant
+                method_postfix = '_%s' % section_name
+
+            section_parser_method = getattr(
+                self,
+                # Dots in section names are tranlsated into dunderscores.
+                ('parse_section%s' % method_postfix).replace('.', '__'),
+                None)
+
+            if section_parser_method is None:
+                raise DistutilsOptionError(
+                    'Unsupported distribution option section: [%s.%s]' % (
+                        self.section_prefix, section_name))
+
+            section_parser_method(section_options)
+
+
+class ConfigMetadataHandler(ConfigHandler):
+
+    section_prefix = 'metadata'
+
+    aliases = {
+        'home_page': 'url',
+        'summary': 'description',
+        'classifier': 'classifiers',
+        'platform': 'platforms',
+    }
+
+    strict_mode = False
+    """We need to keep it loose, to be partially compatible with
+    `pbr` and `d2to1` packages which also uses `metadata` section.
+
+    """
+
+    @property
+    def parsers(self):
+        """Metadata item name to parser function mapping."""
+        parse_list = self._parse_list
+        parse_file = self._parse_file
+        parse_dict = self._parse_dict
+
+        return {
+            'platforms': parse_list,
+            'keywords': parse_list,
+            'provides': parse_list,
+            'requires': parse_list,
+            'obsoletes': parse_list,
+            'classifiers': self._get_parser_compound(parse_file, parse_list),
+            'license': parse_file,
+            'description': parse_file,
+            'long_description': parse_file,
+            'version': self._parse_version,
+            'project_urls': parse_dict,
+        }
+
+    def _parse_version(self, value):
+        """Parses `version` option value.
+
+        :param value:
+        :rtype: str
+
+        """
+        version = self._parse_attr(value)
+
+        if callable(version):
+            version = version()
+
+        if not isinstance(version, string_types):
+            if hasattr(version, '__iter__'):
+                version = '.'.join(map(str, version))
+            else:
+                version = '%s' % version
+
+        return version
+
+
+class ConfigOptionsHandler(ConfigHandler):
+
+    section_prefix = 'options'
+
+    @property
+    def parsers(self):
+        """Metadata item name to parser function mapping."""
+        parse_list = self._parse_list
+        parse_list_semicolon = partial(self._parse_list, separator=';')
+        parse_bool = self._parse_bool
+        parse_dict = self._parse_dict
+
+        return {
+            'zip_safe': parse_bool,
+            'use_2to3': parse_bool,
+            'include_package_data': parse_bool,
+            'package_dir': parse_dict,
+            'use_2to3_fixers': parse_list,
+            'use_2to3_exclude_fixers': parse_list,
+            'convert_2to3_doctests': parse_list,
+            'scripts': parse_list,
+            'eager_resources': parse_list,
+            'dependency_links': parse_list,
+            'namespace_packages': parse_list,
+            'install_requires': parse_list_semicolon,
+            'setup_requires': parse_list_semicolon,
+            'tests_require': parse_list_semicolon,
+            'packages': self._parse_packages,
+            'entry_points': self._parse_file,
+            'py_modules': parse_list,
+        }
+
+    def _parse_packages(self, value):
+        """Parses `packages` option value.
+
+        :param value:
+        :rtype: list
+        """
+        find_directive = 'find:'
+
+        if not value.startswith(find_directive):
+            return self._parse_list(value)
+
+        # Read function arguments from a dedicated section.
+        find_kwargs = self.parse_section_packages__find(
+            self.sections.get('packages.find', {}))
+
+        from setuptools import find_packages
+
+        return find_packages(**find_kwargs)
+
+    def parse_section_packages__find(self, section_options):
+        """Parses `packages.find` configuration file section.
+
+        To be used in conjunction with _parse_packages().
+
+        :param dict section_options:
+        """
+        section_data = self._parse_section_to_dict(
+            section_options, self._parse_list)
+
+        valid_keys = ['where', 'include', 'exclude']
+
+        find_kwargs = dict(
+            [(k, v) for k, v in section_data.items() if k in valid_keys and v])
+
+        where = find_kwargs.get('where')
+        if where is not None:
+            find_kwargs['where'] = where[0]  # cast list to single val
+
+        return find_kwargs
+
+    def parse_section_entry_points(self, section_options):
+        """Parses `entry_points` configuration file section.
+
+        :param dict section_options:
+        """
+        parsed = self._parse_section_to_dict(section_options, self._parse_list)
+        self['entry_points'] = parsed
+
+    def _parse_package_data(self, section_options):
+        parsed = self._parse_section_to_dict(section_options, self._parse_list)
+
+        root = parsed.get('*')
+        if root:
+            parsed[''] = root
+            del parsed['*']
+
+        return parsed
+
+    def parse_section_package_data(self, section_options):
+        """Parses `package_data` configuration file section.
+
+        :param dict section_options:
+        """
+        self['package_data'] = self._parse_package_data(section_options)
+
+    def parse_section_exclude_package_data(self, section_options):
+        """Parses `exclude_package_data` configuration file section.
+
+        :param dict section_options:
+        """
+        self['exclude_package_data'] = self._parse_package_data(
+            section_options)
+
+    def parse_section_extras_require(self, section_options):
+        """Parses `extras_require` configuration file section.
+
+        :param dict section_options:
+        """
+        parse_list = partial(self._parse_list, separator=';')
+        self['extras_require'] = self._parse_section_to_dict(
+            section_options, parse_list)
diff --git a/setuptools/dep_util.py b/setuptools/dep_util.py
new file mode 100644
index 0000000..2931c13
--- /dev/null
+++ b/setuptools/dep_util.py
@@ -0,0 +1,23 @@
+from distutils.dep_util import newer_group
+
+# yes, this is was almost entirely copy-pasted from
+# 'newer_pairwise()', this is just another convenience
+# function.
+def newer_pairwise_group(sources_groups, targets):
+    """Walk both arguments in parallel, testing if each source group is newer
+    than its corresponding target. Returns a pair of lists (sources_groups,
+    targets) where sources is newer than target, according to the semantics
+    of 'newer_group()'.
+    """
+    if len(sources_groups) != len(targets):
+        raise ValueError("'sources_group' and 'targets' must be the same length")
+
+    # build a pair of lists (sources_groups, targets) where source is newer
+    n_sources = []
+    n_targets = []
+    for i in range(len(sources_groups)):
+        if newer_group(sources_groups[i], targets[i]):
+            n_sources.append(sources_groups[i])
+            n_targets.append(targets[i])
+
+    return n_sources, n_targets
diff --git a/setuptools/depends.py b/setuptools/depends.py
new file mode 100644
index 0000000..45e7052
--- /dev/null
+++ b/setuptools/depends.py
@@ -0,0 +1,186 @@
+import sys
+import imp
+import marshal
+from distutils.version import StrictVersion
+from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
+
+from .py33compat import Bytecode
+
+
+__all__ = [
+    'Require', 'find_module', 'get_module_constant', 'extract_constant'
+]
+
+
+class Require:
+    """A prerequisite to building or installing a distribution"""
+
+    def __init__(self, name, requested_version, module, homepage='',
+            attribute=None, format=None):
+
+        if format is None and requested_version is not None:
+            format = StrictVersion
+
+        if format is not None:
+            requested_version = format(requested_version)
+            if attribute is None:
+                attribute = '__version__'
+
+        self.__dict__.update(locals())
+        del self.self
+
+    def full_name(self):
+        """Return full package/distribution name, w/version"""
+        if self.requested_version is not None:
+            return '%s-%s' % (self.name, self.requested_version)
+        return self.name
+
+    def version_ok(self, version):
+        """Is 'version' sufficiently up-to-date?"""
+        return self.attribute is None or self.format is None or \
+            str(version) != "unknown" and version >= self.requested_version
+
+    def get_version(self, paths=None, default="unknown"):
+        """Get version number of installed module, 'None', or 'default'
+
+        Search 'paths' for module.  If not found, return 'None'.  If found,
+        return the extracted version attribute, or 'default' if no version
+        attribute was specified, or the value cannot be determined without
+        importing the module.  The version is formatted according to the
+        requirement's version format (if any), unless it is 'None' or the
+        supplied 'default'.
+        """
+
+        if self.attribute is None:
+            try:
+                f, p, i = find_module(self.module, paths)
+                if f:
+                    f.close()
+                return default
+            except ImportError:
+                return None
+
+        v = get_module_constant(self.module, self.attribute, default, paths)
+
+        if v is not None and v is not default and self.format is not None:
+            return self.format(v)
+
+        return v
+
+    def is_present(self, paths=None):
+        """Return true if dependency is present on 'paths'"""
+        return self.get_version(paths) is not None
+
+    def is_current(self, paths=None):
+        """Return true if dependency is present and up-to-date on 'paths'"""
+        version = self.get_version(paths)
+        if version is None:
+            return False
+        return self.version_ok(version)
+
+
+def find_module(module, paths=None):
+    """Just like 'imp.find_module()', but with package support"""
+
+    parts = module.split('.')
+
+    while parts:
+        part = parts.pop(0)
+        f, path, (suffix, mode, kind) = info = imp.find_module(part, paths)
+
+        if kind == PKG_DIRECTORY:
+            parts = parts or ['__init__']
+            paths = [path]
+
+        elif parts:
+            raise ImportError("Can't find %r in %s" % (parts, module))
+
+    return info
+
+
+def get_module_constant(module, symbol, default=-1, paths=None):
+    """Find 'module' by searching 'paths', and extract 'symbol'
+
+    Return 'None' if 'module' does not exist on 'paths', or it does not define
+    'symbol'.  If the module defines 'symbol' as a constant, return the
+    constant.  Otherwise, return 'default'."""
+
+    try:
+        f, path, (suffix, mode, kind) = find_module(module, paths)
+    except ImportError:
+        # Module doesn't exist
+        return None
+
+    try:
+        if kind == PY_COMPILED:
+            f.read(8)  # skip magic & date
+            code = marshal.load(f)
+        elif kind == PY_FROZEN:
+            code = imp.get_frozen_object(module)
+        elif kind == PY_SOURCE:
+            code = compile(f.read(), path, 'exec')
+        else:
+            # Not something we can parse; we'll have to import it.  :(
+            if module not in sys.modules:
+                imp.load_module(module, f, path, (suffix, mode, kind))
+            return getattr(sys.modules[module], symbol, None)
+
+    finally:
+        if f:
+            f.close()
+
+    return extract_constant(code, symbol, default)
+
+
+def extract_constant(code, symbol, default=-1):
+    """Extract the constant value of 'symbol' from 'code'
+
+    If the name 'symbol' is bound to a constant value by the Python code
+    object 'code', return that value.  If 'symbol' is bound to an expression,
+    return 'default'.  Otherwise, return 'None'.
+
+    Return value is based on the first assignment to 'symbol'.  'symbol' must
+    be a global, or at least a non-"fast" local in the code block.  That is,
+    only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
+    must be present in 'code.co_names'.
+    """
+    if symbol not in code.co_names:
+        # name's not there, can't possibly be an assignment
+        return None
+
+    name_idx = list(code.co_names).index(symbol)
+
+    STORE_NAME = 90
+    STORE_GLOBAL = 97
+    LOAD_CONST = 100
+
+    const = default
+
+    for byte_code in Bytecode(code):
+        op = byte_code.opcode
+        arg = byte_code.arg
+
+        if op == LOAD_CONST:
+            const = code.co_consts[arg]
+        elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
+            return const
+        else:
+            const = default
+
+
+def _update_globals():
+    """
+    Patch the globals to remove the objects not available on some platforms.
+
+    XXX it'd be better to test assertions about bytecode instead.
+    """
+
+    if not sys.platform.startswith('java') and sys.platform != 'cli':
+        return
+    incompatible = 'extract_constant', 'get_module_constant'
+    for name in incompatible:
+        del globals()[name]
+        __all__.remove(name)
+
+
+_update_globals()
diff --git a/setuptools/dist.py b/setuptools/dist.py
new file mode 100644
index 0000000..321ab6b
--- /dev/null
+++ b/setuptools/dist.py
@@ -0,0 +1,1061 @@
+# -*- coding: utf-8 -*-
+__all__ = ['Distribution']
+
+import re
+import os
+import warnings
+import numbers
+import distutils.log
+import distutils.core
+import distutils.cmd
+import distutils.dist
+import itertools
+from collections import defaultdict
+from distutils.errors import (
+    DistutilsOptionError, DistutilsPlatformError, DistutilsSetupError,
+)
+from distutils.util import rfc822_escape
+from distutils.version import StrictVersion
+
+from setuptools.extern import six
+from setuptools.extern import packaging
+from setuptools.extern.six.moves import map, filter, filterfalse
+
+from setuptools.depends import Require
+from setuptools import windows_support
+from setuptools.monkey import get_unpatched
+from setuptools.config import parse_configuration
+import pkg_resources
+from .py36compat import Distribution_parse_config_files
+
+__import__('setuptools.extern.packaging.specifiers')
+__import__('setuptools.extern.packaging.version')
+
+
+def _get_unpatched(cls):
+    warnings.warn("Do not call this function", DeprecationWarning)
+    return get_unpatched(cls)
+
+
+def get_metadata_version(dist_md):
+    if dist_md.long_description_content_type or dist_md.provides_extras:
+        return StrictVersion('2.1')
+    elif (dist_md.maintainer is not None or
+          dist_md.maintainer_email is not None or
+          getattr(dist_md, 'python_requires', None) is not None):
+        return StrictVersion('1.2')
+    elif (dist_md.provides or dist_md.requires or dist_md.obsoletes or
+            dist_md.classifiers or dist_md.download_url):
+        return StrictVersion('1.1')
+
+    return StrictVersion('1.0')
+
+
+# Based on Python 3.5 version
+def write_pkg_file(self, file):
+    """Write the PKG-INFO format data to a file object.
+    """
+    version = get_metadata_version(self)
+
+    file.write('Metadata-Version: %s\n' % version)
+    file.write('Name: %s\n' % self.get_name())
+    file.write('Version: %s\n' % self.get_version())
+    file.write('Summary: %s\n' % self.get_description())
+    file.write('Home-page: %s\n' % self.get_url())
+
+    if version < StrictVersion('1.2'):
+        file.write('Author: %s\n' % self.get_contact())
+        file.write('Author-email: %s\n' % self.get_contact_email())
+    else:
+        optional_fields = (
+            ('Author', 'author'),
+            ('Author-email', 'author_email'),
+            ('Maintainer', 'maintainer'),
+            ('Maintainer-email', 'maintainer_email'),
+        )
+
+        for field, attr in optional_fields:
+            attr_val = getattr(self, attr)
+            if six.PY2:
+                attr_val = self._encode_field(attr_val)
+
+            if attr_val is not None:
+                file.write('%s: %s\n' % (field, attr_val))
+
+    file.write('License: %s\n' % self.get_license())
+    if self.download_url:
+        file.write('Download-URL: %s\n' % self.download_url)
+    for project_url in self.project_urls.items():
+        file.write('Project-URL: %s, %s\n' % project_url)
+
+    long_desc = rfc822_escape(self.get_long_description())
+    file.write('Description: %s\n' % long_desc)
+
+    keywords = ','.join(self.get_keywords())
+    if keywords:
+        file.write('Keywords: %s\n' % keywords)
+
+    if version >= StrictVersion('1.2'):
+        for platform in self.get_platforms():
+            file.write('Platform: %s\n' % platform)
+    else:
+        self._write_list(file, 'Platform', self.get_platforms())
+
+    self._write_list(file, 'Classifier', self.get_classifiers())
+
+    # PEP 314
+    self._write_list(file, 'Requires', self.get_requires())
+    self._write_list(file, 'Provides', self.get_provides())
+    self._write_list(file, 'Obsoletes', self.get_obsoletes())
+
+    # Setuptools specific for PEP 345
+    if hasattr(self, 'python_requires'):
+        file.write('Requires-Python: %s\n' % self.python_requires)
+
+    # PEP 566
+    if self.long_description_content_type:
+        file.write(
+            'Description-Content-Type: %s\n' %
+            self.long_description_content_type
+        )
+    if self.provides_extras:
+        for extra in self.provides_extras:
+            file.write('Provides-Extra: %s\n' % extra)
+
+
+sequence = tuple, list
+
+
+def check_importable(dist, attr, value):
+    try:
+        ep = pkg_resources.EntryPoint.parse('x=' + value)
+        assert not ep.extras
+    except (TypeError, ValueError, AttributeError, AssertionError):
+        raise DistutilsSetupError(
+            "%r must be importable 'module:attrs' string (got %r)"
+            % (attr, value)
+        )
+
+
+def assert_string_list(dist, attr, value):
+    """Verify that value is a string list or None"""
+    try:
+        assert ''.join(value) != value
+    except (TypeError, ValueError, AttributeError, AssertionError):
+        raise DistutilsSetupError(
+            "%r must be a list of strings (got %r)" % (attr, value)
+        )
+
+
+def check_nsp(dist, attr, value):
+    """Verify that namespace packages are valid"""
+    ns_packages = value
+    assert_string_list(dist, attr, ns_packages)
+    for nsp in ns_packages:
+        if not dist.has_contents_for(nsp):
+            raise DistutilsSetupError(
+                "Distribution contains no modules or packages for " +
+                "namespace package %r" % nsp
+            )
+        parent, sep, child = nsp.rpartition('.')
+        if parent and parent not in ns_packages:
+            distutils.log.warn(
+                "WARNING: %r is declared as a package namespace, but %r"
+                " is not: please correct this in setup.py", nsp, parent
+            )
+
+
+def check_extras(dist, attr, value):
+    """Verify that extras_require mapping is valid"""
+    try:
+        list(itertools.starmap(_check_extra, value.items()))
+    except (TypeError, ValueError, AttributeError):
+        raise DistutilsSetupError(
+            "'extras_require' must be a dictionary whose values are "
+            "strings or lists of strings containing valid project/version "
+            "requirement specifiers."
+        )
+
+
+def _check_extra(extra, reqs):
+    name, sep, marker = extra.partition(':')
+    if marker and pkg_resources.invalid_marker(marker):
+        raise DistutilsSetupError("Invalid environment marker: " + marker)
+    list(pkg_resources.parse_requirements(reqs))
+
+
+def assert_bool(dist, attr, value):
+    """Verify that value is True, False, 0, or 1"""
+    if bool(value) != value:
+        tmpl = "{attr!r} must be a boolean value (got {value!r})"
+        raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
+
+
+def check_requirements(dist, attr, value):
+    """Verify that install_requires is a valid requirements list"""
+    try:
+        list(pkg_resources.parse_requirements(value))
+        if isinstance(value, (dict, set)):
+            raise TypeError("Unordered types are not allowed")
+    except (TypeError, ValueError) as error:
+        tmpl = (
+            "{attr!r} must be a string or list of strings "
+            "containing valid project/version requirement specifiers; {error}"
+        )
+        raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
+
+
+def check_specifier(dist, attr, value):
+    """Verify that value is a valid version specifier"""
+    try:
+        packaging.specifiers.SpecifierSet(value)
+    except packaging.specifiers.InvalidSpecifier as error:
+        tmpl = (
+            "{attr!r} must be a string "
+            "containing valid version specifiers; {error}"
+        )
+        raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
+
+
+def check_entry_points(dist, attr, value):
+    """Verify that entry_points map is parseable"""
+    try:
+        pkg_resources.EntryPoint.parse_map(value)
+    except ValueError as e:
+        raise DistutilsSetupError(e)
+
+
+def check_test_suite(dist, attr, value):
+    if not isinstance(value, six.string_types):
+        raise DistutilsSetupError("test_suite must be a string")
+
+
+def check_package_data(dist, attr, value):
+    """Verify that value is a dictionary of package names to glob lists"""
+    if isinstance(value, dict):
+        for k, v in value.items():
+            if not isinstance(k, str):
+                break
+            try:
+                iter(v)
+            except TypeError:
+                break
+        else:
+            return
+    raise DistutilsSetupError(
+        attr + " must be a dictionary mapping package names to lists of "
+        "wildcard patterns"
+    )
+
+
+def check_packages(dist, attr, value):
+    for pkgname in value:
+        if not re.match(r'\w+(\.\w+)*', pkgname):
+            distutils.log.warn(
+                "WARNING: %r not a valid package name; please use only "
+                ".-separated package names in setup.py", pkgname
+            )
+
+
+_Distribution = get_unpatched(distutils.core.Distribution)
+
+
+class Distribution(Distribution_parse_config_files, _Distribution):
+    """Distribution with support for features, tests, and package data
+
+    This is an enhanced version of 'distutils.dist.Distribution' that
+    effectively adds the following new optional keyword arguments to 'setup()':
+
+     'install_requires' -- a string or sequence of strings specifying project
+        versions that the distribution requires when installed, in the format
+        used by 'pkg_resources.require()'.  They will be installed
+        automatically when the package is installed.  If you wish to use
+        packages that are not available in PyPI, or want to give your users an
+        alternate download location, you can add a 'find_links' option to the
+        '[easy_install]' section of your project's 'setup.cfg' file, and then
+        setuptools will scan the listed web pages for links that satisfy the
+        requirements.
+
+     'extras_require' -- a dictionary mapping names of optional "extras" to the
+        additional requirement(s) that using those extras incurs. For example,
+        this::
+
+            extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
+
+        indicates that the distribution can optionally provide an extra
+        capability called "reST", but it can only be used if docutils and
+        reSTedit are installed.  If the user installs your package using
+        EasyInstall and requests one of your extras, the corresponding
+        additional requirements will be installed if needed.
+
+     'features' **deprecated** -- a dictionary mapping option names to
+        'setuptools.Feature'
+        objects.  Features are a portion of the distribution that can be
+        included or excluded based on user options, inter-feature dependencies,
+        and availability on the current system.  Excluded features are omitted
+        from all setup commands, including source and binary distributions, so
+        you can create multiple distributions from the same source tree.
+        Feature names should be valid Python identifiers, except that they may
+        contain the '-' (minus) sign.  Features can be included or excluded
+        via the command line options '--with-X' and '--without-X', where 'X' is
+        the name of the feature.  Whether a feature is included by default, and
+        whether you are allowed to control this from the command line, is
+        determined by the Feature object.  See the 'Feature' class for more
+        information.
+
+     'test_suite' -- the name of a test suite to run for the 'test' command.
+        If the user runs 'python setup.py test', the package will be installed,
+        and the named test suite will be run.  The format is the same as
+        would be used on a 'unittest.py' command line.  That is, it is the
+        dotted name of an object to import and call to generate a test suite.
+
+     'package_data' -- a dictionary mapping package names to lists of filenames
+        or globs to use to find data files contained in the named packages.
+        If the dictionary has filenames or globs listed under '""' (the empty
+        string), those names will be searched for in every package, in addition
+        to any names for the specific package.  Data files found using these
+        names/globs will be installed along with the package, in the same
+        location as the package.  Note that globs are allowed to reference
+        the contents of non-package subdirectories, as long as you use '/' as
+        a path separator.  (Globs are automatically converted to
+        platform-specific paths at runtime.)
+
+    In addition to these new keywords, this class also has several new methods
+    for manipulating the distribution's contents.  For example, the 'include()'
+    and 'exclude()' methods can be thought of as in-place add and subtract
+    commands that add or remove packages, modules, extensions, and so on from
+    the distribution.  They are used by the feature subsystem to configure the
+    distribution for the included and excluded features.
+    """
+
+    _patched_dist = None
+
+    def patch_missing_pkg_info(self, attrs):
+        # Fake up a replacement for the data that would normally come from
+        # PKG-INFO, but which might not yet be built if this is a fresh
+        # checkout.
+        #
+        if not attrs or 'name' not in attrs or 'version' not in attrs:
+            return
+        key = pkg_resources.safe_name(str(attrs['name'])).lower()
+        dist = pkg_resources.working_set.by_key.get(key)
+        if dist is not None and not dist.has_metadata('PKG-INFO'):
+            dist._version = pkg_resources.safe_version(str(attrs['version']))
+            self._patched_dist = dist
+
+    def __init__(self, attrs=None):
+        have_package_data = hasattr(self, "package_data")
+        if not have_package_data:
+            self.package_data = {}
+        attrs = attrs or {}
+        if 'features' in attrs or 'require_features' in attrs:
+            Feature.warn_deprecated()
+        self.require_features = []
+        self.features = {}
+        self.dist_files = []
+        self.src_root = attrs.pop("src_root", None)
+        self.patch_missing_pkg_info(attrs)
+        self.project_urls = attrs.get('project_urls', {})
+        self.dependency_links = attrs.pop('dependency_links', [])
+        self.setup_requires = attrs.pop('setup_requires', [])
+        for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
+            vars(self).setdefault(ep.name, None)
+        _Distribution.__init__(self, attrs)
+
+        # The project_urls attribute may not be supported in distutils, so
+        # prime it here from our value if not automatically set
+        self.metadata.project_urls = getattr(
+            self.metadata, 'project_urls', self.project_urls)
+        self.metadata.long_description_content_type = attrs.get(
+            'long_description_content_type'
+        )
+        self.metadata.provides_extras = getattr(
+            self.metadata, 'provides_extras', set()
+        )
+
+        if isinstance(self.metadata.version, numbers.Number):
+            # Some people apparently take "version number" too literally :)
+            self.metadata.version = str(self.metadata.version)
+
+        if self.metadata.version is not None:
+            try:
+                ver = packaging.version.Version(self.metadata.version)
+                normalized_version = str(ver)
+                if self.metadata.version != normalized_version:
+                    warnings.warn(
+                        "Normalizing '%s' to '%s'" % (
+                            self.metadata.version,
+                            normalized_version,
+                        )
+                    )
+                    self.metadata.version = normalized_version
+            except (packaging.version.InvalidVersion, TypeError):
+                warnings.warn(
+                    "The version specified (%r) is an invalid version, this "
+                    "may not work as expected with newer versions of "
+                    "setuptools, pip, and PyPI. Please see PEP 440 for more "
+                    "details." % self.metadata.version
+                )
+        self._finalize_requires()
+
+    def _finalize_requires(self):
+        """
+        Set `metadata.python_requires` and fix environment markers
+        in `install_requires` and `extras_require`.
+        """
+        if getattr(self, 'python_requires', None):
+            self.metadata.python_requires = self.python_requires
+
+        if getattr(self, 'extras_require', None):
+            for extra in self.extras_require.keys():
+                # Since this gets called multiple times at points where the
+                # keys have become 'converted' extras, ensure that we are only
+                # truly adding extras we haven't seen before here.
+                extra = extra.split(':')[0]
+                if extra:
+                    self.metadata.provides_extras.add(extra)
+
+        self._convert_extras_requirements()
+        self._move_install_requirements_markers()
+
+    def _convert_extras_requirements(self):
+        """
+        Convert requirements in `extras_require` of the form
+        `"extra": ["barbazquux; {marker}"]` to
+        `"extra:{marker}": ["barbazquux"]`.
+        """
+        spec_ext_reqs = getattr(self, 'extras_require', None) or {}
+        self._tmp_extras_require = defaultdict(list)
+        for section, v in spec_ext_reqs.items():
+            # Do not strip empty sections.
+            self._tmp_extras_require[section]
+            for r in pkg_resources.parse_requirements(v):
+                suffix = self._suffix_for(r)
+                self._tmp_extras_require[section + suffix].append(r)
+
+    @staticmethod
+    def _suffix_for(req):
+        """
+        For a requirement, return the 'extras_require' suffix for
+        that requirement.
+        """
+        return ':' + str(req.marker) if req.marker else ''
+
+    def _move_install_requirements_markers(self):
+        """
+        Move requirements in `install_requires` that are using environment
+        markers `extras_require`.
+        """
+
+        # divide the install_requires into two sets, simple ones still
+        # handled by install_requires and more complex ones handled
+        # by extras_require.
+
+        def is_simple_req(req):
+            return not req.marker
+
+        spec_inst_reqs = getattr(self, 'install_requires', None) or ()
+        inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs))
+        simple_reqs = filter(is_simple_req, inst_reqs)
+        complex_reqs = filterfalse(is_simple_req, inst_reqs)
+        self.install_requires = list(map(str, simple_reqs))
+
+        for r in complex_reqs:
+            self._tmp_extras_require[':' + str(r.marker)].append(r)
+        self.extras_require = dict(
+            (k, [str(r) for r in map(self._clean_req, v)])
+            for k, v in self._tmp_extras_require.items()
+        )
+
+    def _clean_req(self, req):
+        """
+        Given a Requirement, remove environment markers and return it.
+        """
+        req.marker = None
+        return req
+
+    def parse_config_files(self, filenames=None, ignore_option_errors=False):
+        """Parses configuration files from various levels
+        and loads configuration.
+
+        """
+        _Distribution.parse_config_files(self, filenames=filenames)
+
+        parse_configuration(self, self.command_options,
+                            ignore_option_errors=ignore_option_errors)
+        self._finalize_requires()
+
+    def parse_command_line(self):
+        """Process features after parsing command line options"""
+        result = _Distribution.parse_command_line(self)
+        if self.features:
+            self._finalize_features()
+        return result
+
+    def _feature_attrname(self, name):
+        """Convert feature name to corresponding option attribute name"""
+        return 'with_' + name.replace('-', '_')
+
+    def fetch_build_eggs(self, requires):
+        """Resolve pre-setup requirements"""
+        resolved_dists = pkg_resources.working_set.resolve(
+            pkg_resources.parse_requirements(requires),
+            installer=self.fetch_build_egg,
+            replace_conflicting=True,
+        )
+        for dist in resolved_dists:
+            pkg_resources.working_set.add(dist, replace=True)
+        return resolved_dists
+
+    def finalize_options(self):
+        _Distribution.finalize_options(self)
+        if self.features:
+            self._set_global_opts_from_features()
+
+        for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
+            value = getattr(self, ep.name, None)
+            if value is not None:
+                ep.require(installer=self.fetch_build_egg)
+                ep.load()(self, ep.name, value)
+        if getattr(self, 'convert_2to3_doctests', None):
+            # XXX may convert to set here when we can rely on set being builtin
+            self.convert_2to3_doctests = [
+                os.path.abspath(p)
+                for p in self.convert_2to3_doctests
+            ]
+        else:
+            self.convert_2to3_doctests = []
+
+    def get_egg_cache_dir(self):
+        egg_cache_dir = os.path.join(os.curdir, '.eggs')
+        if not os.path.exists(egg_cache_dir):
+            os.mkdir(egg_cache_dir)
+            windows_support.hide_file(egg_cache_dir)
+            readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
+            with open(readme_txt_filename, 'w') as f:
+                f.write('This directory contains eggs that were downloaded '
+                        'by setuptools to build, test, and run plug-ins.\n\n')
+                f.write('This directory caches those eggs to prevent '
+                        'repeated downloads.\n\n')
+                f.write('However, it is safe to delete this directory.\n\n')
+
+        return egg_cache_dir
+
+    def fetch_build_egg(self, req):
+        """Fetch an egg needed for building"""
+        from setuptools.command.easy_install import easy_install
+        dist = self.__class__({'script_args': ['easy_install']})
+        opts = dist.get_option_dict('easy_install')
+        opts.clear()
+        opts.update(
+            (k, v)
+            for k, v in self.get_option_dict('easy_install').items()
+            if k in (
+                # don't use any other settings
+                'find_links', 'site_dirs', 'index_url',
+                'optimize', 'site_dirs', 'allow_hosts',
+            ))
+        if self.dependency_links:
+            links = self.dependency_links[:]
+            if 'find_links' in opts:
+                links = opts['find_links'][1] + links
+            opts['find_links'] = ('setup', links)
+        install_dir = self.get_egg_cache_dir()
+        cmd = easy_install(
+            dist, args=["x"], install_dir=install_dir,
+            exclude_scripts=True,
+            always_copy=False, build_directory=None, editable=False,
+            upgrade=False, multi_version=True, no_report=True, user=False
+        )
+        cmd.ensure_finalized()
+        return cmd.easy_install(req)
+
+    def _set_global_opts_from_features(self):
+        """Add --with-X/--without-X options based on optional features"""
+
+        go = []
+        no = self.negative_opt.copy()
+
+        for name, feature in self.features.items():
+            self._set_feature(name, None)
+            feature.validate(self)
+
+            if feature.optional:
+                descr = feature.description
+                incdef = ' (default)'
+                excdef = ''
+                if not feature.include_by_default():
+                    excdef, incdef = incdef, excdef
+
+                new = (
+                    ('with-' + name, None, 'include ' + descr + incdef),
+                    ('without-' + name, None, 'exclude ' + descr + excdef),
+                )
+                go.extend(new)
+                no['without-' + name] = 'with-' + name
+
+        self.global_options = self.feature_options = go + self.global_options
+        self.negative_opt = self.feature_negopt = no
+
+    def _finalize_features(self):
+        """Add/remove features and resolve dependencies between them"""
+
+        # First, flag all the enabled items (and thus their dependencies)
+        for name, feature in self.features.items():
+            enabled = self.feature_is_included(name)
+            if enabled or (enabled is None and feature.include_by_default()):
+                feature.include_in(self)
+                self._set_feature(name, 1)
+
+        # Then disable the rest, so that off-by-default features don't
+        # get flagged as errors when they're required by an enabled feature
+        for name, feature in self.features.items():
+            if not self.feature_is_included(name):
+                feature.exclude_from(self)
+                self._set_feature(name, 0)
+
+    def get_command_class(self, command):
+        """Pluggable version of get_command_class()"""
+        if command in self.cmdclass:
+            return self.cmdclass[command]
+
+        eps = pkg_resources.iter_entry_points('distutils.commands', command)
+        for ep in eps:
+            ep.require(installer=self.fetch_build_egg)
+            self.cmdclass[command] = cmdclass = ep.load()
+            return cmdclass
+        else:
+            return _Distribution.get_command_class(self, command)
+
+    def print_commands(self):
+        for ep in pkg_resources.iter_entry_points('distutils.commands'):
+            if ep.name not in self.cmdclass:
+                # don't require extras as the commands won't be invoked
+                cmdclass = ep.resolve()
+                self.cmdclass[ep.name] = cmdclass
+        return _Distribution.print_commands(self)
+
+    def get_command_list(self):
+        for ep in pkg_resources.iter_entry_points('distutils.commands'):
+            if ep.name not in self.cmdclass:
+                # don't require extras as the commands won't be invoked
+                cmdclass = ep.resolve()
+                self.cmdclass[ep.name] = cmdclass
+        return _Distribution.get_command_list(self)
+
+    def _set_feature(self, name, status):
+        """Set feature's inclusion status"""
+        setattr(self, self._feature_attrname(name), status)
+
+    def feature_is_included(self, name):
+        """Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
+        return getattr(self, self._feature_attrname(name))
+
+    def include_feature(self, name):
+        """Request inclusion of feature named 'name'"""
+
+        if self.feature_is_included(name) == 0:
+            descr = self.features[name].description
+            raise DistutilsOptionError(
+                descr + " is required, but was excluded or is not available"
+            )
+        self.features[name].include_in(self)
+        self._set_feature(name, 1)
+
+    def include(self, **attrs):
+        """Add items to distribution that are named in keyword arguments
+
+        For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
+        the distribution's 'py_modules' attribute, if it was not already
+        there.
+
+        Currently, this method only supports inclusion for attributes that are
+        lists or tuples.  If you need to add support for adding to other
+        attributes in this or a subclass, you can add an '_include_X' method,
+        where 'X' is the name of the attribute.  The method will be called with
+        the value passed to 'include()'.  So, 'dist.include(foo={"bar":"baz"})'
+        will try to call 'dist._include_foo({"bar":"baz"})', which can then
+        handle whatever special inclusion logic is needed.
+        """
+        for k, v in attrs.items():
+            include = getattr(self, '_include_' + k, None)
+            if include:
+                include(v)
+            else:
+                self._include_misc(k, v)
+
+    def exclude_package(self, package):
+        """Remove packages, modules, and extensions in named package"""
+
+        pfx = package + '.'
+        if self.packages:
+            self.packages = [
+                p for p in self.packages
+                if p != package and not p.startswith(pfx)
+            ]
+
+        if self.py_modules:
+            self.py_modules = [
+                p for p in self.py_modules
+                if p != package and not p.startswith(pfx)
+            ]
+
+        if self.ext_modules:
+            self.ext_modules = [
+                p for p in self.ext_modules
+                if p.name != package and not p.name.startswith(pfx)
+            ]
+
+    def has_contents_for(self, package):
+        """Return true if 'exclude_package(package)' would do something"""
+
+        pfx = package + '.'
+
+        for p in self.iter_distribution_names():
+            if p == package or p.startswith(pfx):
+                return True
+
+    def _exclude_misc(self, name, value):
+        """Handle 'exclude()' for list/tuple attrs without a special handler"""
+        if not isinstance(value, sequence):
+            raise DistutilsSetupError(
+                "%s: setting must be a list or tuple (%r)" % (name, value)
+            )
+        try:
+            old = getattr(self, name)
+        except AttributeError:
+            raise DistutilsSetupError(
+                "%s: No such distribution setting" % name
+            )
+        if old is not None and not isinstance(old, sequence):
+            raise DistutilsSetupError(
+                name + ": this setting cannot be changed via include/exclude"
+            )
+        elif old:
+            setattr(self, name, [item for item in old if item not in value])
+
+    def _include_misc(self, name, value):
+        """Handle 'include()' for list/tuple attrs without a special handler"""
+
+        if not isinstance(value, sequence):
+            raise DistutilsSetupError(
+                "%s: setting must be a list (%r)" % (name, value)
+            )
+        try:
+            old = getattr(self, name)
+        except AttributeError:
+            raise DistutilsSetupError(
+                "%s: No such distribution setting" % name
+            )
+        if old is None:
+            setattr(self, name, value)
+        elif not isinstance(old, sequence):
+            raise DistutilsSetupError(
+                name + ": this setting cannot be changed via include/exclude"
+            )
+        else:
+            new = [item for item in value if item not in old]
+            setattr(self, name, old + new)
+
+    def exclude(self, **attrs):
+        """Remove items from distribution that are named in keyword arguments
+
+        For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
+        the distribution's 'py_modules' attribute.  Excluding packages uses
+        the 'exclude_package()' method, so all of the package's contained
+        packages, modules, and extensions are also excluded.
+
+        Currently, this method only supports exclusion from attributes that are
+        lists or tuples.  If you need to add support for excluding from other
+        attributes in this or a subclass, you can add an '_exclude_X' method,
+        where 'X' is the name of the attribute.  The method will be called with
+        the value passed to 'exclude()'.  So, 'dist.exclude(foo={"bar":"baz"})'
+        will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
+        handle whatever special exclusion logic is needed.
+        """
+        for k, v in attrs.items():
+            exclude = getattr(self, '_exclude_' + k, None)
+            if exclude:
+                exclude(v)
+            else:
+                self._exclude_misc(k, v)
+
+    def _exclude_packages(self, packages):
+        if not isinstance(packages, sequence):
+            raise DistutilsSetupError(
+                "packages: setting must be a list or tuple (%r)" % (packages,)
+            )
+        list(map(self.exclude_package, packages))
+
+    def _parse_command_opts(self, parser, args):
+        # Remove --with-X/--without-X options when processing command args
+        self.global_options = self.__class__.global_options
+        self.negative_opt = self.__class__.negative_opt
+
+        # First, expand any aliases
+        command = args[0]
+        aliases = self.get_option_dict('aliases')
+        while command in aliases:
+            src, alias = aliases[command]
+            del aliases[command]  # ensure each alias can expand only once!
+            import shlex
+            args[:1] = shlex.split(alias, True)
+            command = args[0]
+
+        nargs = _Distribution._parse_command_opts(self, parser, args)
+
+        # Handle commands that want to consume all remaining arguments
+        cmd_class = self.get_command_class(command)
+        if getattr(cmd_class, 'command_consumes_arguments', None):
+            self.get_option_dict(command)['args'] = ("command line", nargs)
+            if nargs is not None:
+                return []
+
+        return nargs
+
+    def get_cmdline_options(self):
+        """Return a '{cmd: {opt:val}}' map of all command-line options
+
+        Option names are all long, but do not include the leading '--', and
+        contain dashes rather than underscores.  If the option doesn't take
+        an argument (e.g. '--quiet'), the 'val' is 'None'.
+
+        Note that options provided by config files are intentionally excluded.
+        """
+
+        d = {}
+
+        for cmd, opts in self.command_options.items():
+
+            for opt, (src, val) in opts.items():
+
+                if src != "command line":
+                    continue
+
+                opt = opt.replace('_', '-')
+
+                if val == 0:
+                    cmdobj = self.get_command_obj(cmd)
+                    neg_opt = self.negative_opt.copy()
+                    neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
+                    for neg, pos in neg_opt.items():
+                        if pos == opt:
+                            opt = neg
+                            val = None
+                            break
+                    else:
+                        raise AssertionError("Shouldn't be able to get here")
+
+                elif val == 1:
+                    val = None
+
+                d.setdefault(cmd, {})[opt] = val
+
+        return d
+
+    def iter_distribution_names(self):
+        """Yield all packages, modules, and extension names in distribution"""
+
+        for pkg in self.packages or ():
+            yield pkg
+
+        for module in self.py_modules or ():
+            yield module
+
+        for ext in self.ext_modules or ():
+            if isinstance(ext, tuple):
+                name, buildinfo = ext
+            else:
+                name = ext.name
+            if name.endswith('module'):
+                name = name[:-6]
+            yield name
+
+    def handle_display_options(self, option_order):
+        """If there were any non-global "display-only" options
+        (--help-commands or the metadata display options) on the command
+        line, display the requested info and return true; else return
+        false.
+        """
+        import sys
+
+        if six.PY2 or self.help_commands:
+            return _Distribution.handle_display_options(self, option_order)
+
+        # Stdout may be StringIO (e.g. in tests)
+        import io
+        if not isinstance(sys.stdout, io.TextIOWrapper):
+            return _Distribution.handle_display_options(self, option_order)
+
+        # Don't wrap stdout if utf-8 is already the encoding. Provides
+        #  workaround for #334.
+        if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
+            return _Distribution.handle_display_options(self, option_order)
+
+        # Print metadata in UTF-8 no matter the platform
+        encoding = sys.stdout.encoding
+        errors = sys.stdout.errors
+        newline = sys.platform != 'win32' and '\n' or None
+        line_buffering = sys.stdout.line_buffering
+
+        sys.stdout = io.TextIOWrapper(
+            sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
+        try:
+            return _Distribution.handle_display_options(self, option_order)
+        finally:
+            sys.stdout = io.TextIOWrapper(
+                sys.stdout.detach(), encoding, errors, newline, line_buffering)
+
+
+class Feature:
+    """
+    **deprecated** -- The `Feature` facility was never completely implemented
+    or supported, `has reported issues
+    <https://github.com/pypa/setuptools/issues/58>`_ and will be removed in
+    a future version.
+
+    A subset of the distribution that can be excluded if unneeded/wanted
+
+    Features are created using these keyword arguments:
+
+      'description' -- a short, human readable description of the feature, to
+         be used in error messages, and option help messages.
+
+      'standard' -- if true, the feature is included by default if it is
+         available on the current system.  Otherwise, the feature is only
+         included if requested via a command line '--with-X' option, or if
+         another included feature requires it.  The default setting is 'False'.
+
+      'available' -- if true, the feature is available for installation on the
+         current system.  The default setting is 'True'.
+
+      'optional' -- if true, the feature's inclusion can be controlled from the
+         command line, using the '--with-X' or '--without-X' options.  If
+         false, the feature's inclusion status is determined automatically,
+         based on 'availabile', 'standard', and whether any other feature
+         requires it.  The default setting is 'True'.
+
+      'require_features' -- a string or sequence of strings naming features
+         that should also be included if this feature is included.  Defaults to
+         empty list.  May also contain 'Require' objects that should be
+         added/removed from the distribution.
+
+      'remove' -- a string or list of strings naming packages to be removed
+         from the distribution if this feature is *not* included.  If the
+         feature *is* included, this argument is ignored.  This argument exists
+         to support removing features that "crosscut" a distribution, such as
+         defining a 'tests' feature that removes all the 'tests' subpackages
+         provided by other features.  The default for this argument is an empty
+         list.  (Note: the named package(s) or modules must exist in the base
+         distribution when the 'setup()' function is initially called.)
+
+      other keywords -- any other keyword arguments are saved, and passed to
+         the distribution's 'include()' and 'exclude()' methods when the
+         feature is included or excluded, respectively.  So, for example, you
+         could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
+         added or removed from the distribution as appropriate.
+
+    A feature must include at least one 'requires', 'remove', or other
+    keyword argument.  Otherwise, it can't affect the distribution in any way.
+    Note also that you can subclass 'Feature' to create your own specialized
+    feature types that modify the distribution in other ways when included or
+    excluded.  See the docstrings for the various methods here for more detail.
+    Aside from the methods, the only feature attributes that distributions look
+    at are 'description' and 'optional'.
+    """
+
+    @staticmethod
+    def warn_deprecated():
+        msg = (
+            "Features are deprecated and will be removed in a future "
+            "version. See https://github.com/pypa/setuptools/issues/65."
+        )
+        warnings.warn(msg, DeprecationWarning, stacklevel=3)
+
+    def __init__(
+            self, description, standard=False, available=True,
+            optional=True, require_features=(), remove=(), **extras):
+        self.warn_deprecated()
+
+        self.description = description
+        self.standard = standard
+        self.available = available
+        self.optional = optional
+        if isinstance(require_features, (str, Require)):
+            require_features = require_features,
+
+        self.require_features = [
+            r for r in require_features if isinstance(r, str)
+        ]
+        er = [r for r in require_features if not isinstance(r, str)]
+        if er:
+            extras['require_features'] = er
+
+        if isinstance(remove, str):
+            remove = remove,
+        self.remove = remove
+        self.extras = extras
+
+        if not remove and not require_features and not extras:
+            raise DistutilsSetupError(
+                "Feature %s: must define 'require_features', 'remove', or "
+                "at least one of 'packages', 'py_modules', etc."
+            )
+
+    def include_by_default(self):
+        """Should this feature be included by default?"""
+        return self.available and self.standard
+
+    def include_in(self, dist):
+        """Ensure feature and its requirements are included in distribution
+
+        You may override this in a subclass to perform additional operations on
+        the distribution.  Note that this method may be called more than once
+        per feature, and so should be idempotent.
+
+        """
+
+        if not self.available:
+            raise DistutilsPlatformError(
+                self.description + " is required, "
+                "but is not available on this platform"
+            )
+
+        dist.include(**self.extras)
+
+        for f in self.require_features:
+            dist.include_feature(f)
+
+    def exclude_from(self, dist):
+        """Ensure feature is excluded from distribution
+
+        You may override this in a subclass to perform additional operations on
+        the distribution.  This method will be called at most once per
+        feature, and only after all included features have been asked to
+        include themselves.
+        """
+
+        dist.exclude(**self.extras)
+
+        if self.remove:
+            for item in self.remove:
+                dist.exclude_package(item)
+
+    def validate(self, dist):
+        """Verify that feature makes sense in context of distribution
+
+        This method is called by the distribution just before it parses its
+        command line.  It checks to ensure that the 'remove' attribute, if any,
+        contains only valid package/module names that are present in the base
+        distribution when 'setup()' is called.  You may override it in a
+        subclass to perform any other required validation of the feature
+        against a target distribution.
+        """
+
+        for item in self.remove:
+            if not dist.has_contents_for(item):
+                raise DistutilsSetupError(
+                    "%s wants to be able to remove %s, but the distribution"
+                    " doesn't contain any packages or modules under %s"
+                    % (self.description, item, item)
+                )
diff --git a/setuptools/extension.py b/setuptools/extension.py
new file mode 100644
index 0000000..2946889
--- /dev/null
+++ b/setuptools/extension.py
@@ -0,0 +1,57 @@
+import re
+import functools
+import distutils.core
+import distutils.errors
+import distutils.extension
+
+from setuptools.extern.six.moves import map
+
+from .monkey import get_unpatched
+
+
+def _have_cython():
+    """
+    Return True if Cython can be imported.
+    """
+    cython_impl = 'Cython.Distutils.build_ext'
+    try:
+        # from (cython_impl) import build_ext
+        __import__(cython_impl, fromlist=['build_ext']).build_ext
+        return True
+    except Exception:
+        pass
+    return False
+
+
+# for compatibility
+have_pyrex = _have_cython
+
+_Extension = get_unpatched(distutils.core.Extension)
+
+
+class Extension(_Extension):
+    """Extension that uses '.c' files in place of '.pyx' files"""
+
+    def __init__(self, name, sources, *args, **kw):
+        # The *args is needed for compatibility as calls may use positional
+        # arguments. py_limited_api may be set only via keyword.
+        self.py_limited_api = kw.pop("py_limited_api", False)
+        _Extension.__init__(self, name, sources, *args, **kw)
+
+    def _convert_pyx_sources_to_lang(self):
+        """
+        Replace sources with .pyx extensions to sources with the target
+        language extension. This mechanism allows language authors to supply
+        pre-converted sources but to prefer the .pyx sources.
+        """
+        if _have_cython():
+            # the build has Cython, so allow it to compile the .pyx files
+            return
+        lang = self.language or ''
+        target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
+        sub = functools.partial(re.sub, '.pyx$', target_ext)
+        self.sources = list(map(sub, self.sources))
+
+
+class Library(Extension):
+    """Just like a regular Extension, but built as a library instead"""
diff --git a/setuptools/extern/__init__.py b/setuptools/extern/__init__.py
new file mode 100644
index 0000000..da3d668
--- /dev/null
+++ b/setuptools/extern/__init__.py
@@ -0,0 +1,73 @@
+import sys
+
+
+class VendorImporter:
+    """
+    A PEP 302 meta path importer for finding optionally-vendored
+    or otherwise naturally-installed packages from root_name.
+    """
+
+    def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
+        self.root_name = root_name
+        self.vendored_names = set(vendored_names)
+        self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
+
+    @property
+    def search_path(self):
+        """
+        Search first the vendor package then as a natural package.
+        """
+        yield self.vendor_pkg + '.'
+        yield ''
+
+    def find_module(self, fullname, path=None):
+        """
+        Return self when fullname starts with root_name and the
+        target module is one vendored through this importer.
+        """
+        root, base, target = fullname.partition(self.root_name + '.')
+        if root:
+            return
+        if not any(map(target.startswith, self.vendored_names)):
+            return
+        return self
+
+    def load_module(self, fullname):
+        """
+        Iterate over the search path to locate and load fullname.
+        """
+        root, base, target = fullname.partition(self.root_name + '.')
+        for prefix in self.search_path:
+            try:
+                extant = prefix + target
+                __import__(extant)
+                mod = sys.modules[extant]
+                sys.modules[fullname] = mod
+                # mysterious hack:
+                # Remove the reference to the extant package/module
+                # on later Python versions to cause relative imports
+                # in the vendor package to resolve the same modules
+                # as those going through this importer.
+                if sys.version_info > (3, 3):
+                    del sys.modules[extant]
+                return mod
+            except ImportError:
+                pass
+        else:
+            raise ImportError(
+                "The '{target}' package is required; "
+                "normally this is bundled with this package so if you get "
+                "this warning, consult the packager of your "
+                "distribution.".format(**locals())
+            )
+
+    def install(self):
+        """
+        Install this importer into sys.meta_path if not already present.
+        """
+        if self not in sys.meta_path:
+            sys.meta_path.append(self)
+
+
+names = 'six', 'packaging', 'pyparsing',
+VendorImporter(__name__, names, 'setuptools._vendor').install()
diff --git a/setuptools/glibc.py b/setuptools/glibc.py
new file mode 100644
index 0000000..a134591
--- /dev/null
+++ b/setuptools/glibc.py
@@ -0,0 +1,86 @@
+# This file originally from pip:
+# https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/utils/glibc.py
+from __future__ import absolute_import
+
+import ctypes
+import re
+import warnings
+
+
+def glibc_version_string():
+    "Returns glibc version string, or None if not using glibc."
+
+    # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+    # manpage says, "If filename is NULL, then the returned handle is for the
+    # main program". This way we can let the linker do the work to figure out
+    # which libc our process is actually using.
+    process_namespace = ctypes.CDLL(None)
+    try:
+        gnu_get_libc_version = process_namespace.gnu_get_libc_version
+    except AttributeError:
+        # Symbol doesn't exist -> therefore, we are not linked to
+        # glibc.
+        return None
+
+    # Call gnu_get_libc_version, which returns a string like "2.5"
+    gnu_get_libc_version.restype = ctypes.c_char_p
+    version_str = gnu_get_libc_version()
+    # py2 / py3 compatibility:
+    if not isinstance(version_str, str):
+        version_str = version_str.decode("ascii")
+
+    return version_str
+
+
+# Separated out from have_compatible_glibc for easier unit testing
+def check_glibc_version(version_str, required_major, minimum_minor):
+    # Parse string and check against requested version.
+    #
+    # We use a regexp instead of str.split because we want to discard any
+    # random junk that might come after the minor version -- this might happen
+    # in patched/forked versions of glibc (e.g. Linaro's version of glibc
+    # uses version strings like "2.20-2014.11"). See gh-3588.
+    m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
+    if not m:
+        warnings.warn("Expected glibc version with 2 components major.minor,"
+                      " got: %s" % version_str, RuntimeWarning)
+        return False
+    return (int(m.group("major")) == required_major and
+            int(m.group("minor")) >= minimum_minor)
+
+
+def have_compatible_glibc(required_major, minimum_minor):
+    version_str = glibc_version_string()
+    if version_str is None:
+        return False
+    return check_glibc_version(version_str, required_major, minimum_minor)
+
+
+# platform.libc_ver regularly returns completely nonsensical glibc
+# versions. E.g. on my computer, platform says:
+#
+#   ~$ python2.7 -c 'import platform; print(platform.libc_ver())'
+#   ('glibc', '2.7')
+#   ~$ python3.5 -c 'import platform; print(platform.libc_ver())'
+#   ('glibc', '2.9')
+#
+# But the truth is:
+#
+#   ~$ ldd --version
+#   ldd (Debian GLIBC 2.22-11) 2.22
+#
+# This is unfortunate, because it means that the linehaul data on libc
+# versions that was generated by pip 8.1.2 and earlier is useless and
+# misleading. Solution: instead of using platform, use our code that actually
+# works.
+def libc_ver():
+    """Try to determine the glibc version
+
+    Returns a tuple of strings (lib, version) which default to empty strings
+    in case the lookup fails.
+    """
+    glibc_version = glibc_version_string()
+    if glibc_version is None:
+        return ("", "")
+    else:
+        return ("glibc", glibc_version)
diff --git a/setuptools/glob.py b/setuptools/glob.py
new file mode 100644
index 0000000..6c781de
--- /dev/null
+++ b/setuptools/glob.py
@@ -0,0 +1,176 @@
+"""
+Filename globbing utility. Mostly a copy of `glob` from Python 3.5.
+
+Changes include:
+ * `yield from` and PEP3102 `*` removed.
+ * `bytes` changed to `six.binary_type`.
+ * Hidden files are not ignored.
+"""
+
+import os
+import re
+import fnmatch
+from setuptools.extern.six import binary_type
+
+__all__ = ["glob", "iglob", "escape"]
+
+
+def glob(pathname, recursive=False):
+    """Return a list of paths matching a pathname pattern.
+
+    The pattern may contain simple shell-style wildcards a la
+    fnmatch. However, unlike fnmatch, filenames starting with a
+    dot are special cases that are not matched by '*' and '?'
+    patterns.
+
+    If recursive is true, the pattern '**' will match any files and
+    zero or more directories and subdirectories.
+    """
+    return list(iglob(pathname, recursive=recursive))
+
+
+def iglob(pathname, recursive=False):
+    """Return an iterator which yields the paths matching a pathname pattern.
+
+    The pattern may contain simple shell-style wildcards a la
+    fnmatch. However, unlike fnmatch, filenames starting with a
+    dot are special cases that are not matched by '*' and '?'
+    patterns.
+
+    If recursive is true, the pattern '**' will match any files and
+    zero or more directories and subdirectories.
+    """
+    it = _iglob(pathname, recursive)
+    if recursive and _isrecursive(pathname):
+        s = next(it)  # skip empty string
+        assert not s
+    return it
+
+
+def _iglob(pathname, recursive):
+    dirname, basename = os.path.split(pathname)
+    if not has_magic(pathname):
+        if basename:
+            if os.path.lexists(pathname):
+                yield pathname
+        else:
+            # Patterns ending with a slash should match only directories
+            if os.path.isdir(dirname):
+                yield pathname
+        return
+    if not dirname:
+        if recursive and _isrecursive(basename):
+            for x in glob2(dirname, basename):
+                yield x
+        else:
+            for x in glob1(dirname, basename):
+                yield x
+        return
+    # `os.path.split()` returns the argument itself as a dirname if it is a
+    # drive or UNC path.  Prevent an infinite recursion if a drive or UNC path
+    # contains magic characters (i.e. r'\\?\C:').
+    if dirname != pathname and has_magic(dirname):
+        dirs = _iglob(dirname, recursive)
+    else:
+        dirs = [dirname]
+    if has_magic(basename):
+        if recursive and _isrecursive(basename):
+            glob_in_dir = glob2
+        else:
+            glob_in_dir = glob1
+    else:
+        glob_in_dir = glob0
+    for dirname in dirs:
+        for name in glob_in_dir(dirname, basename):
+            yield os.path.join(dirname, name)
+
+
+# These 2 helper functions non-recursively glob inside a literal directory.
+# They return a list of basenames. `glob1` accepts a pattern while `glob0`
+# takes a literal basename (so it only has to check for its existence).
+
+
+def glob1(dirname, pattern):
+    if not dirname:
+        if isinstance(pattern, binary_type):
+            dirname = os.curdir.encode('ASCII')
+        else:
+            dirname = os.curdir
+    try:
+        names = os.listdir(dirname)
+    except OSError:
+        return []
+    return fnmatch.filter(names, pattern)
+
+
+def glob0(dirname, basename):
+    if not basename:
+        # `os.path.split()` returns an empty basename for paths ending with a
+        # directory separator.  'q*x/' should match only directories.
+        if os.path.isdir(dirname):
+            return [basename]
+    else:
+        if os.path.lexists(os.path.join(dirname, basename)):
+            return [basename]
+    return []
+
+
+# This helper function recursively yields relative pathnames inside a literal
+# directory.
+
+
+def glob2(dirname, pattern):
+    assert _isrecursive(pattern)
+    yield pattern[:0]
+    for x in _rlistdir(dirname):
+        yield x
+
+
+# Recursively yields relative pathnames inside a literal directory.
+def _rlistdir(dirname):
+    if not dirname:
+        if isinstance(dirname, binary_type):
+            dirname = binary_type(os.curdir, 'ASCII')
+        else:
+            dirname = os.curdir
+    try:
+        names = os.listdir(dirname)
+    except os.error:
+        return
+    for x in names:
+        yield x
+        path = os.path.join(dirname, x) if dirname else x
+        for y in _rlistdir(path):
+            yield os.path.join(x, y)
+
+
+magic_check = re.compile('([*?[])')
+magic_check_bytes = re.compile(b'([*?[])')
+
+
+def has_magic(s):
+    if isinstance(s, binary_type):
+        match = magic_check_bytes.search(s)
+    else:
+        match = magic_check.search(s)
+    return match is not None
+
+
+def _isrecursive(pattern):
+    if isinstance(pattern, binary_type):
+        return pattern == b'**'
+    else:
+        return pattern == '**'
+
+
+def escape(pathname):
+    """Escape all special characters.
+    """
+    # Escaping is done by wrapping any of "*?[" between square brackets.
+    # Metacharacters do not work in the drive part and shouldn't be escaped.
+    drive, pathname = os.path.splitdrive(pathname)
+    if isinstance(pathname, binary_type):
+        pathname = magic_check_bytes.sub(br'[\1]', pathname)
+    else:
+        pathname = magic_check.sub(r'[\1]', pathname)
+    return drive + pathname
diff --git a/setuptools/gui-32.exe b/setuptools/gui-32.exe
new file mode 100644
index 0000000..f8d3509
--- /dev/null
+++ b/setuptools/gui-32.exe
Binary files differ
diff --git a/setuptools/gui-64.exe b/setuptools/gui-64.exe
new file mode 100644
index 0000000..330c51a
--- /dev/null
+++ b/setuptools/gui-64.exe
Binary files differ
diff --git a/setuptools/gui.exe b/setuptools/gui.exe
new file mode 100644
index 0000000..f8d3509
--- /dev/null
+++ b/setuptools/gui.exe
Binary files differ
diff --git a/setuptools/launch.py b/setuptools/launch.py
new file mode 100644
index 0000000..308283e
--- /dev/null
+++ b/setuptools/launch.py
@@ -0,0 +1,35 @@
+"""
+Launch the Python script on the command line after
+setuptools is bootstrapped via import.
+"""
+
+# Note that setuptools gets imported implicitly by the
+# invocation of this script using python -m setuptools.launch
+
+import tokenize
+import sys
+
+
+def run():
+    """
+    Run the script in sys.argv[1] as if it had
+    been invoked naturally.
+    """
+    __builtins__
+    script_name = sys.argv[1]
+    namespace = dict(
+        __file__=script_name,
+        __name__='__main__',
+        __doc__=None,
+    )
+    sys.argv[:] = sys.argv[1:]
+
+    open_ = getattr(tokenize, 'open', open)
+    script = open_(script_name).read()
+    norm_script = script.replace('\\r\\n', '\\n')
+    code = compile(norm_script, script_name, 'exec')
+    exec(code, namespace)
+
+
+if __name__ == '__main__':
+    run()
diff --git a/setuptools/lib2to3_ex.py b/setuptools/lib2to3_ex.py
new file mode 100644
index 0000000..4b1a73f
--- /dev/null
+++ b/setuptools/lib2to3_ex.py
@@ -0,0 +1,62 @@
+"""
+Customized Mixin2to3 support:
+
+ - adds support for converting doctests
+
+
+This module raises an ImportError on Python 2.
+"""
+
+from distutils.util import Mixin2to3 as _Mixin2to3
+from distutils import log
+from lib2to3.refactor import RefactoringTool, get_fixers_from_package
+
+import setuptools
+
+
+class DistutilsRefactoringTool(RefactoringTool):
+    def log_error(self, msg, *args, **kw):
+        log.error(msg, *args)
+
+    def log_message(self, msg, *args):
+        log.info(msg, *args)
+
+    def log_debug(self, msg, *args):
+        log.debug(msg, *args)
+
+
+class Mixin2to3(_Mixin2to3):
+    def run_2to3(self, files, doctests=False):
+        # See of the distribution option has been set, otherwise check the
+        # setuptools default.
+        if self.distribution.use_2to3 is not True:
+            return
+        if not files:
+            return
+        log.info("Fixing " + " ".join(files))
+        self.__build_fixer_names()
+        self.__exclude_fixers()
+        if doctests:
+            if setuptools.run_2to3_on_doctests:
+                r = DistutilsRefactoringTool(self.fixer_names)
+                r.refactor(files, write=True, doctests_only=True)
+        else:
+            _Mixin2to3.run_2to3(self, files)
+
+    def __build_fixer_names(self):
+        if self.fixer_names:
+            return
+        self.fixer_names = []
+        for p in setuptools.lib2to3_fixer_packages:
+            self.fixer_names.extend(get_fixers_from_package(p))
+        if self.distribution.use_2to3_fixers is not None:
+            for p in self.distribution.use_2to3_fixers:
+                self.fixer_names.extend(get_fixers_from_package(p))
+
+    def __exclude_fixers(self):
+        excluded_fixers = getattr(self, 'exclude_fixers', [])
+        if self.distribution.use_2to3_exclude_fixers is not None:
+            excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
+        for fixer_name in excluded_fixers:
+            if fixer_name in self.fixer_names:
+                self.fixer_names.remove(fixer_name)
diff --git a/setuptools/monkey.py b/setuptools/monkey.py
new file mode 100644
index 0000000..08ed50d
--- /dev/null
+++ b/setuptools/monkey.py
@@ -0,0 +1,181 @@
+"""
+Monkey patching of distutils.
+"""
+
+import sys
+import distutils.filelist
+import platform
+import types
+import functools
+from importlib import import_module
+import inspect
+
+from setuptools.extern import six
+
+import setuptools
+
+__all__ = []
+"""
+Everything is private. Contact the project team
+if you think you need this functionality.
+"""
+
+
+def _get_mro(cls):
+    """
+    Returns the bases classes for cls sorted by the MRO.
+
+    Works around an issue on Jython where inspect.getmro will not return all
+    base classes if multiple classes share the same name. Instead, this
+    function will return a tuple containing the class itself, and the contents
+    of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
+    """
+    if platform.python_implementation() == "Jython":
+        return (cls,) + cls.__bases__
+    return inspect.getmro(cls)
+
+
+def get_unpatched(item):
+    lookup = (
+        get_unpatched_class if isinstance(item, six.class_types) else
+        get_unpatched_function if isinstance(item, types.FunctionType) else
+        lambda item: None
+    )
+    return lookup(item)
+
+
+def get_unpatched_class(cls):
+    """Protect against re-patching the distutils if reloaded
+
+    Also ensures that no other distutils extension monkeypatched the distutils
+    first.
+    """
+    external_bases = (
+        cls
+        for cls in _get_mro(cls)
+        if not cls.__module__.startswith('setuptools')
+    )
+    base = next(external_bases)
+    if not base.__module__.startswith('distutils'):
+        msg = "distutils has already been patched by %r" % cls
+        raise AssertionError(msg)
+    return base
+
+
+def patch_all():
+    # we can't patch distutils.cmd, alas
+    distutils.core.Command = setuptools.Command
+
+    has_issue_12885 = sys.version_info <= (3, 5, 3)
+
+    if has_issue_12885:
+        # fix findall bug in distutils (http://bugs.python.org/issue12885)
+        distutils.filelist.findall = setuptools.findall
+
+    needs_warehouse = (
+        sys.version_info < (2, 7, 13)
+        or
+        (3, 0) < sys.version_info < (3, 3, 7)
+        or
+        (3, 4) < sys.version_info < (3, 4, 6)
+        or
+        (3, 5) < sys.version_info <= (3, 5, 3)
+    )
+
+    if needs_warehouse:
+        warehouse = 'https://upload.pypi.org/legacy/'
+        distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
+
+    _patch_distribution_metadata_write_pkg_file()
+
+    # Install Distribution throughout the distutils
+    for module in distutils.dist, distutils.core, distutils.cmd:
+        module.Distribution = setuptools.dist.Distribution
+
+    # Install the patched Extension
+    distutils.core.Extension = setuptools.extension.Extension
+    distutils.extension.Extension = setuptools.extension.Extension
+    if 'distutils.command.build_ext' in sys.modules:
+        sys.modules['distutils.command.build_ext'].Extension = (
+            setuptools.extension.Extension
+        )
+
+    patch_for_msvc_specialized_compiler()
+
+
+def _patch_distribution_metadata_write_pkg_file():
+    """Patch write_pkg_file to also write Requires-Python/Requires-External"""
+    distutils.dist.DistributionMetadata.write_pkg_file = (
+        setuptools.dist.write_pkg_file
+    )
+
+
+def patch_func(replacement, target_mod, func_name):
+    """
+    Patch func_name in target_mod with replacement
+
+    Important - original must be resolved by name to avoid
+    patching an already patched function.
+    """
+    original = getattr(target_mod, func_name)
+
+    # set the 'unpatched' attribute on the replacement to
+    # point to the original.
+    vars(replacement).setdefault('unpatched', original)
+
+    # replace the function in the original module
+    setattr(target_mod, func_name, replacement)
+
+
+def get_unpatched_function(candidate):
+    return getattr(candidate, 'unpatched')
+
+
+def patch_for_msvc_specialized_compiler():
+    """
+    Patch functions in distutils to use standalone Microsoft Visual C++
+    compilers.
+    """
+    # import late to avoid circular imports on Python < 3.5
+    msvc = import_module('setuptools.msvc')
+
+    if platform.system() != 'Windows':
+        # Compilers only availables on Microsoft Windows
+        return
+
+    def patch_params(mod_name, func_name):
+        """
+        Prepare the parameters for patch_func to patch indicated function.
+        """
+        repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
+        repl_name = repl_prefix + func_name.lstrip('_')
+        repl = getattr(msvc, repl_name)
+        mod = import_module(mod_name)
+        if not hasattr(mod, func_name):
+            raise ImportError(func_name)
+        return repl, mod, func_name
+
+    # Python 2.7 to 3.4
+    msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
+
+    # Python 3.5+
+    msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
+
+    try:
+        # Patch distutils.msvc9compiler
+        patch_func(*msvc9('find_vcvarsall'))
+        patch_func(*msvc9('query_vcvarsall'))
+    except ImportError:
+        pass
+
+    try:
+        # Patch distutils._msvccompiler._get_vc_env
+        patch_func(*msvc14('_get_vc_env'))
+    except ImportError:
+        pass
+
+    try:
+        # Patch distutils._msvccompiler.gen_lib_options for Numpy
+        patch_func(*msvc14('gen_lib_options'))
+    except ImportError:
+        pass
diff --git a/setuptools/msvc.py b/setuptools/msvc.py
new file mode 100644
index 0000000..5e20b3f
--- /dev/null
+++ b/setuptools/msvc.py
@@ -0,0 +1,1302 @@
+"""
+Improved support for Microsoft Visual C++ compilers.
+
+Known supported compilers:
+--------------------------
+Microsoft Visual C++ 9.0:
+    Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
+    Microsoft Windows SDK 6.1 (x86, x64, ia64)
+    Microsoft Windows SDK 7.0 (x86, x64, ia64)
+
+Microsoft Visual C++ 10.0:
+    Microsoft Windows SDK 7.1 (x86, x64, ia64)
+
+Microsoft Visual C++ 14.0:
+    Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
+    Microsoft Visual Studio 2017 (x86, x64, arm, arm64)
+    Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64)
+"""
+
+import os
+import sys
+import platform
+import itertools
+import distutils.errors
+from setuptools.extern.packaging.version import LegacyVersion
+
+from setuptools.extern.six.moves import filterfalse
+
+from .monkey import get_unpatched
+
+if platform.system() == 'Windows':
+    from setuptools.extern.six.moves import winreg
+    safe_env = os.environ
+else:
+    """
+    Mock winreg and environ so the module can be imported
+    on this platform.
+    """
+
+    class winreg:
+        HKEY_USERS = None
+        HKEY_CURRENT_USER = None
+        HKEY_LOCAL_MACHINE = None
+        HKEY_CLASSES_ROOT = None
+
+    safe_env = dict()
+
+_msvc9_suppress_errors = (
+    # msvc9compiler isn't available on some platforms
+    ImportError,
+
+    # msvc9compiler raises DistutilsPlatformError in some
+    # environments. See #1118.
+    distutils.errors.DistutilsPlatformError,
+)
+
+try:
+    from distutils.msvc9compiler import Reg
+except _msvc9_suppress_errors:
+    pass
+
+
+def msvc9_find_vcvarsall(version):
+    """
+    Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone
+    compiler build for Python (VCForPython). Fall back to original behavior
+    when the standalone compiler is not available.
+
+    Redirect the path of "vcvarsall.bat".
+
+    Known supported compilers
+    -------------------------
+    Microsoft Visual C++ 9.0:
+        Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
+
+    Parameters
+    ----------
+    version: float
+        Required Microsoft Visual C++ version.
+
+    Return
+    ------
+    vcvarsall.bat path: str
+    """
+    VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
+    key = VC_BASE % ('', version)
+    try:
+        # Per-user installs register the compiler path here
+        productdir = Reg.get_value(key, "installdir")
+    except KeyError:
+        try:
+            # All-user installs on a 64-bit system register here
+            key = VC_BASE % ('Wow6432Node\\', version)
+            productdir = Reg.get_value(key, "installdir")
+        except KeyError:
+            productdir = None
+
+    if productdir:
+        vcvarsall = os.path.os.path.join(productdir, "vcvarsall.bat")
+        if os.path.isfile(vcvarsall):
+            return vcvarsall
+
+    return get_unpatched(msvc9_find_vcvarsall)(version)
+
+
+def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs):
+    """
+    Patched "distutils.msvc9compiler.query_vcvarsall" for support extra
+    compilers.
+
+    Set environment without use of "vcvarsall.bat".
+
+    Known supported compilers
+    -------------------------
+    Microsoft Visual C++ 9.0:
+        Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
+        Microsoft Windows SDK 6.1 (x86, x64, ia64)
+        Microsoft Windows SDK 7.0 (x86, x64, ia64)
+
+    Microsoft Visual C++ 10.0:
+        Microsoft Windows SDK 7.1 (x86, x64, ia64)
+
+    Parameters
+    ----------
+    ver: float
+        Required Microsoft Visual C++ version.
+    arch: str
+        Target architecture.
+
+    Return
+    ------
+    environment: dict
+    """
+    # Try to get environement from vcvarsall.bat (Classical way)
+    try:
+        orig = get_unpatched(msvc9_query_vcvarsall)
+        return orig(ver, arch, *args, **kwargs)
+    except distutils.errors.DistutilsPlatformError:
+        # Pass error if Vcvarsall.bat is missing
+        pass
+    except ValueError:
+        # Pass error if environment not set after executing vcvarsall.bat
+        pass
+
+    # If error, try to set environment directly
+    try:
+        return EnvironmentInfo(arch, ver).return_env()
+    except distutils.errors.DistutilsPlatformError as exc:
+        _augment_exception(exc, ver, arch)
+        raise
+
+
+def msvc14_get_vc_env(plat_spec):
+    """
+    Patched "distutils._msvccompiler._get_vc_env" for support extra
+    compilers.
+
+    Set environment without use of "vcvarsall.bat".
+
+    Known supported compilers
+    -------------------------
+    Microsoft Visual C++ 14.0:
+        Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
+        Microsoft Visual Studio 2017 (x86, x64, arm, arm64)
+        Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64)
+
+    Parameters
+    ----------
+    plat_spec: str
+        Target architecture.
+
+    Return
+    ------
+    environment: dict
+    """
+    # Try to get environment from vcvarsall.bat (Classical way)
+    try:
+        return get_unpatched(msvc14_get_vc_env)(plat_spec)
+    except distutils.errors.DistutilsPlatformError:
+        # Pass error Vcvarsall.bat is missing
+        pass
+
+    # If error, try to set environment directly
+    try:
+        return EnvironmentInfo(plat_spec, vc_min_ver=14.0).return_env()
+    except distutils.errors.DistutilsPlatformError as exc:
+        _augment_exception(exc, 14.0)
+        raise
+
+
+def msvc14_gen_lib_options(*args, **kwargs):
+    """
+    Patched "distutils._msvccompiler.gen_lib_options" for fix
+    compatibility between "numpy.distutils" and "distutils._msvccompiler"
+    (for Numpy < 1.11.2)
+    """
+    if "numpy.distutils" in sys.modules:
+        import numpy as np
+        if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'):
+            return np.distutils.ccompiler.gen_lib_options(*args, **kwargs)
+    return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs)
+
+
+def _augment_exception(exc, version, arch=''):
+    """
+    Add details to the exception message to help guide the user
+    as to what action will resolve it.
+    """
+    # Error if MSVC++ directory not found or environment not set
+    message = exc.args[0]
+
+    if "vcvarsall" in message.lower() or "visual c" in message.lower():
+        # Special error message if MSVC++ not installed
+        tmpl = 'Microsoft Visual C++ {version:0.1f} is required.'
+        message = tmpl.format(**locals())
+        msdownload = 'www.microsoft.com/download/details.aspx?id=%d'
+        if version == 9.0:
+            if arch.lower().find('ia64') > -1:
+                # For VC++ 9.0, if IA64 support is needed, redirect user
+                # to Windows SDK 7.0
+                message += ' Get it with "Microsoft Windows SDK 7.0": '
+                message += msdownload % 3138
+            else:
+                # For VC++ 9.0 redirect user to Vc++ for Python 2.7 :
+                # This redirection link is maintained by Microsoft.
+                # Contact vspython@microsoft.com if it needs updating.
+                message += ' Get it from http://aka.ms/vcpython27'
+        elif version == 10.0:
+            # For VC++ 10.0 Redirect user to Windows SDK 7.1
+            message += ' Get it with "Microsoft Windows SDK 7.1": '
+            message += msdownload % 8279
+        elif version >= 14.0:
+            # For VC++ 14.0 Redirect user to Visual C++ Build Tools
+            message += (' Get it with "Microsoft Visual C++ Build Tools": '
+                        r'http://landinghub.visualstudio.com/'
+                        'visual-cpp-build-tools')
+
+    exc.args = (message, )
+
+
+class PlatformInfo:
+    """
+    Current and Target Architectures informations.
+
+    Parameters
+    ----------
+    arch: str
+        Target architecture.
+    """
+    current_cpu = safe_env.get('processor_architecture', '').lower()
+
+    def __init__(self, arch):
+        self.arch = arch.lower().replace('x64', 'amd64')
+
+    @property
+    def target_cpu(self):
+        return self.arch[self.arch.find('_') + 1:]
+
+    def target_is_x86(self):
+        return self.target_cpu == 'x86'
+
+    def current_is_x86(self):
+        return self.current_cpu == 'x86'
+
+    def current_dir(self, hidex86=False, x64=False):
+        """
+        Current platform specific subfolder.
+
+        Parameters
+        ----------
+        hidex86: bool
+            return '' and not '\x86' if architecture is x86.
+        x64: bool
+            return '\x64' and not '\amd64' if architecture is amd64.
+
+        Return
+        ------
+        subfolder: str
+            '\target', or '' (see hidex86 parameter)
+        """
+        return (
+            '' if (self.current_cpu == 'x86' and hidex86) else
+            r'\x64' if (self.current_cpu == 'amd64' and x64) else
+            r'\%s' % self.current_cpu
+        )
+
+    def target_dir(self, hidex86=False, x64=False):
+        r"""
+        Target platform specific subfolder.
+
+        Parameters
+        ----------
+        hidex86: bool
+            return '' and not '\x86' if architecture is x86.
+        x64: bool
+            return '\x64' and not '\amd64' if architecture is amd64.
+
+        Return
+        ------
+        subfolder: str
+            '\current', or '' (see hidex86 parameter)
+        """
+        return (
+            '' if (self.target_cpu == 'x86' and hidex86) else
+            r'\x64' if (self.target_cpu == 'amd64' and x64) else
+            r'\%s' % self.target_cpu
+        )
+
+    def cross_dir(self, forcex86=False):
+        r"""
+        Cross platform specific subfolder.
+
+        Parameters
+        ----------
+        forcex86: bool
+            Use 'x86' as current architecture even if current acritecture is
+            not x86.
+
+        Return
+        ------
+        subfolder: str
+            '' if target architecture is current architecture,
+            '\current_target' if not.
+        """
+        current = 'x86' if forcex86 else self.current_cpu
+        return (
+            '' if self.target_cpu == current else
+            self.target_dir().replace('\\', '\\%s_' % current)
+        )
+
+
+class RegistryInfo:
+    """
+    Microsoft Visual Studio related registry informations.
+
+    Parameters
+    ----------
+    platform_info: PlatformInfo
+        "PlatformInfo" instance.
+    """
+    HKEYS = (winreg.HKEY_USERS,
+             winreg.HKEY_CURRENT_USER,
+             winreg.HKEY_LOCAL_MACHINE,
+             winreg.HKEY_CLASSES_ROOT)
+
+    def __init__(self, platform_info):
+        self.pi = platform_info
+
+    @property
+    def visualstudio(self):
+        """
+        Microsoft Visual Studio root registry key.
+        """
+        return 'VisualStudio'
+
+    @property
+    def sxs(self):
+        """
+        Microsoft Visual Studio SxS registry key.
+        """
+        return os.path.join(self.visualstudio, 'SxS')
+
+    @property
+    def vc(self):
+        """
+        Microsoft Visual C++ VC7 registry key.
+        """
+        return os.path.join(self.sxs, 'VC7')
+
+    @property
+    def vs(self):
+        """
+        Microsoft Visual Studio VS7 registry key.
+        """
+        return os.path.join(self.sxs, 'VS7')
+
+    @property
+    def vc_for_python(self):
+        """
+        Microsoft Visual C++ for Python registry key.
+        """
+        return r'DevDiv\VCForPython'
+
+    @property
+    def microsoft_sdk(self):
+        """
+        Microsoft SDK registry key.
+        """
+        return 'Microsoft SDKs'
+
+    @property
+    def windows_sdk(self):
+        """
+        Microsoft Windows/Platform SDK registry key.
+        """
+        return os.path.join(self.microsoft_sdk, 'Windows')
+
+    @property
+    def netfx_sdk(self):
+        """
+        Microsoft .NET Framework SDK registry key.
+        """
+        return os.path.join(self.microsoft_sdk, 'NETFXSDK')
+
+    @property
+    def windows_kits_roots(self):
+        """
+        Microsoft Windows Kits Roots registry key.
+        """
+        return r'Windows Kits\Installed Roots'
+
+    def microsoft(self, key, x86=False):
+        """
+        Return key in Microsoft software registry.
+
+        Parameters
+        ----------
+        key: str
+            Registry key path where look.
+        x86: str
+            Force x86 software registry.
+
+        Return
+        ------
+        str: value
+        """
+        node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
+        return os.path.join('Software', node64, 'Microsoft', key)
+
+    def lookup(self, key, name):
+        """
+        Look for values in registry in Microsoft software registry.
+
+        Parameters
+        ----------
+        key: str
+            Registry key path where look.
+        name: str
+            Value name to find.
+
+        Return
+        ------
+        str: value
+        """
+        KEY_READ = winreg.KEY_READ
+        openkey = winreg.OpenKey
+        ms = self.microsoft
+        for hkey in self.HKEYS:
+            try:
+                bkey = openkey(hkey, ms(key), 0, KEY_READ)
+            except (OSError, IOError):
+                if not self.pi.current_is_x86():
+                    try:
+                        bkey = openkey(hkey, ms(key, True), 0, KEY_READ)
+                    except (OSError, IOError):
+                        continue
+                else:
+                    continue
+            try:
+                return winreg.QueryValueEx(bkey, name)[0]
+            except (OSError, IOError):
+                pass
+
+
+class SystemInfo:
+    """
+    Microsoft Windows and Visual Studio related system inormations.
+
+    Parameters
+    ----------
+    registry_info: RegistryInfo
+        "RegistryInfo" instance.
+    vc_ver: float
+        Required Microsoft Visual C++ version.
+    """
+
+    # Variables and properties in this class use originals CamelCase variables
+    # names from Microsoft source files for more easy comparaison.
+    WinDir = safe_env.get('WinDir', '')
+    ProgramFiles = safe_env.get('ProgramFiles', '')
+    ProgramFilesx86 = safe_env.get('ProgramFiles(x86)', ProgramFiles)
+
+    def __init__(self, registry_info, vc_ver=None):
+        self.ri = registry_info
+        self.pi = self.ri.pi
+        self.vc_ver = vc_ver or self._find_latest_available_vc_ver()
+
+    def _find_latest_available_vc_ver(self):
+        try:
+            return self.find_available_vc_vers()[-1]
+        except IndexError:
+            err = 'No Microsoft Visual C++ version found'
+            raise distutils.errors.DistutilsPlatformError(err)
+
+    def find_available_vc_vers(self):
+        """
+        Find all available Microsoft Visual C++ versions.
+        """
+        ms = self.ri.microsoft
+        vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs)
+        vc_vers = []
+        for hkey in self.ri.HKEYS:
+            for key in vckeys:
+                try:
+                    bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ)
+                except (OSError, IOError):
+                    continue
+                subkeys, values, _ = winreg.QueryInfoKey(bkey)
+                for i in range(values):
+                    try:
+                        ver = float(winreg.EnumValue(bkey, i)[0])
+                        if ver not in vc_vers:
+                            vc_vers.append(ver)
+                    except ValueError:
+                        pass
+                for i in range(subkeys):
+                    try:
+                        ver = float(winreg.EnumKey(bkey, i))
+                        if ver not in vc_vers:
+                            vc_vers.append(ver)
+                    except ValueError:
+                        pass
+        return sorted(vc_vers)
+
+    @property
+    def VSInstallDir(self):
+        """
+        Microsoft Visual Studio directory.
+        """
+        # Default path
+        name = 'Microsoft Visual Studio %0.1f' % self.vc_ver
+        default = os.path.join(self.ProgramFilesx86, name)
+
+        # Try to get path from registry, if fail use default path
+        return self.ri.lookup(self.ri.vs, '%0.1f' % self.vc_ver) or default
+
+    @property
+    def VCInstallDir(self):
+        """
+        Microsoft Visual C++ directory.
+        """
+        self.VSInstallDir
+
+        guess_vc = self._guess_vc() or self._guess_vc_legacy()
+
+        # Try to get "VC++ for Python" path from registry as default path
+        reg_path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
+        python_vc = self.ri.lookup(reg_path, 'installdir')
+        default_vc = os.path.join(python_vc, 'VC') if python_vc else guess_vc
+
+        # Try to get path from registry, if fail use default path
+        path = self.ri.lookup(self.ri.vc, '%0.1f' % self.vc_ver) or default_vc
+
+        if not os.path.isdir(path):
+            msg = 'Microsoft Visual C++ directory not found'
+            raise distutils.errors.DistutilsPlatformError(msg)
+
+        return path
+
+    def _guess_vc(self):
+        """
+        Locate Visual C for 2017
+        """
+        if self.vc_ver <= 14.0:
+            return
+
+        default = r'VC\Tools\MSVC'
+        guess_vc = os.path.join(self.VSInstallDir, default)
+        # Subdir with VC exact version as name
+        try:
+            vc_exact_ver = os.listdir(guess_vc)[-1]
+            return os.path.join(guess_vc, vc_exact_ver)
+        except (OSError, IOError, IndexError):
+            pass
+
+    def _guess_vc_legacy(self):
+        """
+        Locate Visual C for versions prior to 2017
+        """
+        default = r'Microsoft Visual Studio %0.1f\VC' % self.vc_ver
+        return os.path.join(self.ProgramFilesx86, default)
+
+    @property
+    def WindowsSdkVersion(self):
+        """
+        Microsoft Windows SDK versions for specified MSVC++ version.
+        """
+        if self.vc_ver <= 9.0:
+            return ('7.0', '6.1', '6.0a')
+        elif self.vc_ver == 10.0:
+            return ('7.1', '7.0a')
+        elif self.vc_ver == 11.0:
+            return ('8.0', '8.0a')
+        elif self.vc_ver == 12.0:
+            return ('8.1', '8.1a')
+        elif self.vc_ver >= 14.0:
+            return ('10.0', '8.1')
+
+    @property
+    def WindowsSdkLastVersion(self):
+        """
+        Microsoft Windows SDK last version
+        """
+        return self._use_last_dir_name(os.path.join(
+            self.WindowsSdkDir, 'lib'))
+
+    @property
+    def WindowsSdkDir(self):
+        """
+        Microsoft Windows SDK directory.
+        """
+        sdkdir = ''
+        for ver in self.WindowsSdkVersion:
+            # Try to get it from registry
+            loc = os.path.join(self.ri.windows_sdk, 'v%s' % ver)
+            sdkdir = self.ri.lookup(loc, 'installationfolder')
+            if sdkdir:
+                break
+        if not sdkdir or not os.path.isdir(sdkdir):
+            # Try to get "VC++ for Python" version from registry
+            path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
+            install_base = self.ri.lookup(path, 'installdir')
+            if install_base:
+                sdkdir = os.path.join(install_base, 'WinSDK')
+        if not sdkdir or not os.path.isdir(sdkdir):
+            # If fail, use default new path
+            for ver in self.WindowsSdkVersion:
+                intver = ver[:ver.rfind('.')]
+                path = r'Microsoft SDKs\Windows Kits\%s' % (intver)
+                d = os.path.join(self.ProgramFiles, path)
+                if os.path.isdir(d):
+                    sdkdir = d
+        if not sdkdir or not os.path.isdir(sdkdir):
+            # If fail, use default old path
+            for ver in self.WindowsSdkVersion:
+                path = r'Microsoft SDKs\Windows\v%s' % ver
+                d = os.path.join(self.ProgramFiles, path)
+                if os.path.isdir(d):
+                    sdkdir = d
+        if not sdkdir:
+            # If fail, use Platform SDK
+            sdkdir = os.path.join(self.VCInstallDir, 'PlatformSDK')
+        return sdkdir
+
+    @property
+    def WindowsSDKExecutablePath(self):
+        """
+        Microsoft Windows SDK executable directory.
+        """
+        # Find WinSDK NetFx Tools registry dir name
+        if self.vc_ver <= 11.0:
+            netfxver = 35
+            arch = ''
+        else:
+            netfxver = 40
+            hidex86 = True if self.vc_ver <= 12.0 else False
+            arch = self.pi.current_dir(x64=True, hidex86=hidex86)
+        fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-'))
+
+        # liste all possibles registry paths
+        regpaths = []
+        if self.vc_ver >= 14.0:
+            for ver in self.NetFxSdkVersion:
+                regpaths += [os.path.join(self.ri.netfx_sdk, ver, fx)]
+
+        for ver in self.WindowsSdkVersion:
+            regpaths += [os.path.join(self.ri.windows_sdk, 'v%sA' % ver, fx)]
+
+        # Return installation folder from the more recent path
+        for path in regpaths:
+            execpath = self.ri.lookup(path, 'installationfolder')
+            if execpath:
+                break
+        return execpath
+
+    @property
+    def FSharpInstallDir(self):
+        """
+        Microsoft Visual F# directory.
+        """
+        path = r'%0.1f\Setup\F#' % self.vc_ver
+        path = os.path.join(self.ri.visualstudio, path)
+        return self.ri.lookup(path, 'productdir') or ''
+
+    @property
+    def UniversalCRTSdkDir(self):
+        """
+        Microsoft Universal CRT SDK directory.
+        """
+        # Set Kit Roots versions for specified MSVC++ version
+        if self.vc_ver >= 14.0:
+            vers = ('10', '81')
+        else:
+            vers = ()
+
+        # Find path of the more recent Kit
+        for ver in vers:
+            sdkdir = self.ri.lookup(self.ri.windows_kits_roots,
+                                    'kitsroot%s' % ver)
+            if sdkdir:
+                break
+        return sdkdir or ''
+
+    @property
+    def UniversalCRTSdkLastVersion(self):
+        """
+        Microsoft Universal C Runtime SDK last version
+        """
+        return self._use_last_dir_name(os.path.join(
+            self.UniversalCRTSdkDir, 'lib'))
+
+    @property
+    def NetFxSdkVersion(self):
+        """
+        Microsoft .NET Framework SDK versions.
+        """
+        # Set FxSdk versions for specified MSVC++ version
+        if self.vc_ver >= 14.0:
+            return ('4.6.1', '4.6')
+        else:
+            return ()
+
+    @property
+    def NetFxSdkDir(self):
+        """
+        Microsoft .NET Framework SDK directory.
+        """
+        for ver in self.NetFxSdkVersion:
+            loc = os.path.join(self.ri.netfx_sdk, ver)
+            sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
+            if sdkdir:
+                break
+        return sdkdir or ''
+
+    @property
+    def FrameworkDir32(self):
+        """
+        Microsoft .NET Framework 32bit directory.
+        """
+        # Default path
+        guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework')
+
+        # Try to get path from registry, if fail use default path
+        return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
+
+    @property
+    def FrameworkDir64(self):
+        """
+        Microsoft .NET Framework 64bit directory.
+        """
+        # Default path
+        guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework64')
+
+        # Try to get path from registry, if fail use default path
+        return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw
+
+    @property
+    def FrameworkVersion32(self):
+        """
+        Microsoft .NET Framework 32bit versions.
+        """
+        return self._find_dot_net_versions(32)
+
+    @property
+    def FrameworkVersion64(self):
+        """
+        Microsoft .NET Framework 64bit versions.
+        """
+        return self._find_dot_net_versions(64)
+
+    def _find_dot_net_versions(self, bits):
+        """
+        Find Microsoft .NET Framework versions.
+
+        Parameters
+        ----------
+        bits: int
+            Platform number of bits: 32 or 64.
+        """
+        # Find actual .NET version in registry
+        reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits)
+        dot_net_dir = getattr(self, 'FrameworkDir%d' % bits)
+        ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or ''
+
+        # Set .NET versions for specified MSVC++ version
+        if self.vc_ver >= 12.0:
+            frameworkver = (ver, 'v4.0')
+        elif self.vc_ver >= 10.0:
+            frameworkver = ('v4.0.30319' if ver.lower()[:2] != 'v4' else ver,
+                            'v3.5')
+        elif self.vc_ver == 9.0:
+            frameworkver = ('v3.5', 'v2.0.50727')
+        if self.vc_ver == 8.0:
+            frameworkver = ('v3.0', 'v2.0.50727')
+        return frameworkver
+
+    def _use_last_dir_name(self, path, prefix=''):
+        """
+        Return name of the last dir in path or '' if no dir found.
+
+        Parameters
+        ----------
+        path: str
+            Use dirs in this path
+        prefix: str
+            Use only dirs startings by this prefix
+        """
+        matching_dirs = (
+            dir_name
+            for dir_name in reversed(os.listdir(path))
+            if os.path.isdir(os.path.join(path, dir_name)) and
+            dir_name.startswith(prefix)
+        )
+        return next(matching_dirs, None) or ''
+
+
+class EnvironmentInfo:
+    """
+    Return environment variables for specified Microsoft Visual C++ version
+    and platform : Lib, Include, Path and libpath.
+
+    This function is compatible with Microsoft Visual C++ 9.0 to 14.0.
+
+    Script created by analysing Microsoft environment configuration files like
+    "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ...
+
+    Parameters
+    ----------
+    arch: str
+        Target architecture.
+    vc_ver: float
+        Required Microsoft Visual C++ version. If not set, autodetect the last
+        version.
+    vc_min_ver: float
+        Minimum Microsoft Visual C++ version.
+    """
+
+    # Variables and properties in this class use originals CamelCase variables
+    # names from Microsoft source files for more easy comparaison.
+
+    def __init__(self, arch, vc_ver=None, vc_min_ver=0):
+        self.pi = PlatformInfo(arch)
+        self.ri = RegistryInfo(self.pi)
+        self.si = SystemInfo(self.ri, vc_ver)
+
+        if self.vc_ver < vc_min_ver:
+            err = 'No suitable Microsoft Visual C++ version found'
+            raise distutils.errors.DistutilsPlatformError(err)
+
+    @property
+    def vc_ver(self):
+        """
+        Microsoft Visual C++ version.
+        """
+        return self.si.vc_ver
+
+    @property
+    def VSTools(self):
+        """
+        Microsoft Visual Studio Tools
+        """
+        paths = [r'Common7\IDE', r'Common7\Tools']
+
+        if self.vc_ver >= 14.0:
+            arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
+            paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow']
+            paths += [r'Team Tools\Performance Tools']
+            paths += [r'Team Tools\Performance Tools%s' % arch_subdir]
+
+        return [os.path.join(self.si.VSInstallDir, path) for path in paths]
+
+    @property
+    def VCIncludes(self):
+        """
+        Microsoft Visual C++ & Microsoft Foundation Class Includes
+        """
+        return [os.path.join(self.si.VCInstallDir, 'Include'),
+                os.path.join(self.si.VCInstallDir, r'ATLMFC\Include')]
+
+    @property
+    def VCLibraries(self):
+        """
+        Microsoft Visual C++ & Microsoft Foundation Class Libraries
+        """
+        if self.vc_ver >= 15.0:
+            arch_subdir = self.pi.target_dir(x64=True)
+        else:
+            arch_subdir = self.pi.target_dir(hidex86=True)
+        paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir]
+
+        if self.vc_ver >= 14.0:
+            paths += [r'Lib\store%s' % arch_subdir]
+
+        return [os.path.join(self.si.VCInstallDir, path) for path in paths]
+
+    @property
+    def VCStoreRefs(self):
+        """
+        Microsoft Visual C++ store references Libraries
+        """
+        if self.vc_ver < 14.0:
+            return []
+        return [os.path.join(self.si.VCInstallDir, r'Lib\store\references')]
+
+    @property
+    def VCTools(self):
+        """
+        Microsoft Visual C++ Tools
+        """
+        si = self.si
+        tools = [os.path.join(si.VCInstallDir, 'VCPackages')]
+
+        forcex86 = True if self.vc_ver <= 10.0 else False
+        arch_subdir = self.pi.cross_dir(forcex86)
+        if arch_subdir:
+            tools += [os.path.join(si.VCInstallDir, 'Bin%s' % arch_subdir)]
+
+        if self.vc_ver == 14.0:
+            path = 'Bin%s' % self.pi.current_dir(hidex86=True)
+            tools += [os.path.join(si.VCInstallDir, path)]
+
+        elif self.vc_ver >= 15.0:
+            host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else
+                        r'bin\HostX64%s')
+            tools += [os.path.join(
+                si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))]
+
+            if self.pi.current_cpu != self.pi.target_cpu:
+                tools += [os.path.join(
+                    si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))]
+
+        else:
+            tools += [os.path.join(si.VCInstallDir, 'Bin')]
+
+        return tools
+
+    @property
+    def OSLibraries(self):
+        """
+        Microsoft Windows SDK Libraries
+        """
+        if self.vc_ver <= 10.0:
+            arch_subdir = self.pi.target_dir(hidex86=True, x64=True)
+            return [os.path.join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)]
+
+        else:
+            arch_subdir = self.pi.target_dir(x64=True)
+            lib = os.path.join(self.si.WindowsSdkDir, 'lib')
+            libver = self._sdk_subdir
+            return [os.path.join(lib, '%sum%s' % (libver , arch_subdir))]
+
+    @property
+    def OSIncludes(self):
+        """
+        Microsoft Windows SDK Include
+        """
+        include = os.path.join(self.si.WindowsSdkDir, 'include')
+
+        if self.vc_ver <= 10.0:
+            return [include, os.path.join(include, 'gl')]
+
+        else:
+            if self.vc_ver >= 14.0:
+                sdkver = self._sdk_subdir
+            else:
+                sdkver = ''
+            return [os.path.join(include, '%sshared' % sdkver),
+                    os.path.join(include, '%sum' % sdkver),
+                    os.path.join(include, '%swinrt' % sdkver)]
+
+    @property
+    def OSLibpath(self):
+        """
+        Microsoft Windows SDK Libraries Paths
+        """
+        ref = os.path.join(self.si.WindowsSdkDir, 'References')
+        libpath = []
+
+        if self.vc_ver <= 9.0:
+            libpath += self.OSLibraries
+
+        if self.vc_ver >= 11.0:
+            libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')]
+
+        if self.vc_ver >= 14.0:
+            libpath += [
+                ref,
+                os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'),
+                os.path.join(
+                    ref,
+                    'Windows.Foundation.UniversalApiContract',
+                    '1.0.0.0',
+                ),
+                os.path.join(
+                    ref,
+                    'Windows.Foundation.FoundationContract',
+                    '1.0.0.0',
+                ),
+                os.path.join(
+                    ref,
+                    'Windows.Networking.Connectivity.WwanContract',
+                    '1.0.0.0',
+                ),
+                os.path.join(
+                    self.si.WindowsSdkDir,
+                    'ExtensionSDKs',
+                    'Microsoft.VCLibs',
+                    '%0.1f' % self.vc_ver,
+                    'References',
+                    'CommonConfiguration',
+                    'neutral',
+                ),
+            ]
+        return libpath
+
+    @property
+    def SdkTools(self):
+        """
+        Microsoft Windows SDK Tools
+        """
+        return list(self._sdk_tools())
+
+    def _sdk_tools(self):
+        """
+        Microsoft Windows SDK Tools paths generator
+        """
+        if self.vc_ver < 15.0:
+            bin_dir = 'Bin' if self.vc_ver <= 11.0 else r'Bin\x86'
+            yield os.path.join(self.si.WindowsSdkDir, bin_dir)
+
+        if not self.pi.current_is_x86():
+            arch_subdir = self.pi.current_dir(x64=True)
+            path = 'Bin%s' % arch_subdir
+            yield os.path.join(self.si.WindowsSdkDir, path)
+
+        if self.vc_ver == 10.0 or self.vc_ver == 11.0:
+            if self.pi.target_is_x86():
+                arch_subdir = ''
+            else:
+                arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
+            path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
+            yield os.path.join(self.si.WindowsSdkDir, path)
+
+        elif self.vc_ver >= 15.0:
+            path = os.path.join(self.si.WindowsSdkDir, 'Bin')
+            arch_subdir = self.pi.current_dir(x64=True)
+            sdkver = self.si.WindowsSdkLastVersion
+            yield os.path.join(path, '%s%s' % (sdkver, arch_subdir))
+
+        if self.si.WindowsSDKExecutablePath:
+            yield self.si.WindowsSDKExecutablePath
+
+    @property
+    def _sdk_subdir(self):
+        """
+        Microsoft Windows SDK version subdir
+        """
+        ucrtver = self.si.WindowsSdkLastVersion
+        return ('%s\\' % ucrtver) if ucrtver else ''
+
+    @property
+    def SdkSetup(self):
+        """
+        Microsoft Windows SDK Setup
+        """
+        if self.vc_ver > 9.0:
+            return []
+
+        return [os.path.join(self.si.WindowsSdkDir, 'Setup')]
+
+    @property
+    def FxTools(self):
+        """
+        Microsoft .NET Framework Tools
+        """
+        pi = self.pi
+        si = self.si
+
+        if self.vc_ver <= 10.0:
+            include32 = True
+            include64 = not pi.target_is_x86() and not pi.current_is_x86()
+        else:
+            include32 = pi.target_is_x86() or pi.current_is_x86()
+            include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'
+
+        tools = []
+        if include32:
+            tools += [os.path.join(si.FrameworkDir32, ver)
+                      for ver in si.FrameworkVersion32]
+        if include64:
+            tools += [os.path.join(si.FrameworkDir64, ver)
+                      for ver in si.FrameworkVersion64]
+        return tools
+
+    @property
+    def NetFxSDKLibraries(self):
+        """
+        Microsoft .Net Framework SDK Libraries
+        """
+        if self.vc_ver < 14.0 or not self.si.NetFxSdkDir:
+            return []
+
+        arch_subdir = self.pi.target_dir(x64=True)
+        return [os.path.join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)]
+
+    @property
+    def NetFxSDKIncludes(self):
+        """
+        Microsoft .Net Framework SDK Includes
+        """
+        if self.vc_ver < 14.0 or not self.si.NetFxSdkDir:
+            return []
+
+        return [os.path.join(self.si.NetFxSdkDir, r'include\um')]
+
+    @property
+    def VsTDb(self):
+        """
+        Microsoft Visual Studio Team System Database
+        """
+        return [os.path.join(self.si.VSInstallDir, r'VSTSDB\Deploy')]
+
+    @property
+    def MSBuild(self):
+        """
+        Microsoft Build Engine
+        """
+        if self.vc_ver < 12.0:
+            return []
+        elif self.vc_ver < 15.0:
+            base_path = self.si.ProgramFilesx86
+            arch_subdir = self.pi.current_dir(hidex86=True)
+        else:
+            base_path = self.si.VSInstallDir
+            arch_subdir = ''
+
+        path = r'MSBuild\%0.1f\bin%s' % (self.vc_ver, arch_subdir)
+        build = [os.path.join(base_path, path)]
+
+        if self.vc_ver >= 15.0:
+            # Add Roslyn C# & Visual Basic Compiler
+            build += [os.path.join(base_path, path, 'Roslyn')]
+
+        return build
+
+    @property
+    def HTMLHelpWorkshop(self):
+        """
+        Microsoft HTML Help Workshop
+        """
+        if self.vc_ver < 11.0:
+            return []
+
+        return [os.path.join(self.si.ProgramFilesx86, 'HTML Help Workshop')]
+
+    @property
+    def UCRTLibraries(self):
+        """
+        Microsoft Universal C Runtime SDK Libraries
+        """
+        if self.vc_ver < 14.0:
+            return []
+
+        arch_subdir = self.pi.target_dir(x64=True)
+        lib = os.path.join(self.si.UniversalCRTSdkDir, 'lib')
+        ucrtver = self._ucrt_subdir
+        return [os.path.join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]
+
+    @property
+    def UCRTIncludes(self):
+        """
+        Microsoft Universal C Runtime SDK Include
+        """
+        if self.vc_ver < 14.0:
+            return []
+
+        include = os.path.join(self.si.UniversalCRTSdkDir, 'include')
+        return [os.path.join(include, '%sucrt' % self._ucrt_subdir)]
+
+    @property
+    def _ucrt_subdir(self):
+        """
+        Microsoft Universal C Runtime SDK version subdir
+        """
+        ucrtver = self.si.UniversalCRTSdkLastVersion
+        return ('%s\\' % ucrtver) if ucrtver else ''
+
+    @property
+    def FSharp(self):
+        """
+        Microsoft Visual F#
+        """
+        if self.vc_ver < 11.0 and self.vc_ver > 12.0:
+            return []
+
+        return self.si.FSharpInstallDir
+
+    @property
+    def VCRuntimeRedist(self):
+        """
+        Microsoft Visual C++ runtime redistribuable dll
+        """
+        arch_subdir = self.pi.target_dir(x64=True)
+        if self.vc_ver < 15:
+            redist_path = self.si.VCInstallDir
+            vcruntime = 'redist%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll'
+        else:
+            redist_path = self.si.VCInstallDir.replace('\\Tools', '\\Redist')
+            vcruntime = 'onecore%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll'
+
+        # Visual Studio 2017  is still Visual C++ 14.0
+        dll_ver = 14.0 if self.vc_ver == 15 else self.vc_ver
+
+        vcruntime = vcruntime % (arch_subdir, self.vc_ver, dll_ver)
+        return os.path.join(redist_path, vcruntime)
+
+    def return_env(self, exists=True):
+        """
+        Return environment dict.
+
+        Parameters
+        ----------
+        exists: bool
+            It True, only return existing paths.
+        """
+        env = dict(
+            include=self._build_paths('include',
+                                      [self.VCIncludes,
+                                       self.OSIncludes,
+                                       self.UCRTIncludes,
+                                       self.NetFxSDKIncludes],
+                                      exists),
+            lib=self._build_paths('lib',
+                                  [self.VCLibraries,
+                                   self.OSLibraries,
+                                   self.FxTools,
+                                   self.UCRTLibraries,
+                                   self.NetFxSDKLibraries],
+                                  exists),
+            libpath=self._build_paths('libpath',
+                                      [self.VCLibraries,
+                                       self.FxTools,
+                                       self.VCStoreRefs,
+                                       self.OSLibpath],
+                                      exists),
+            path=self._build_paths('path',
+                                   [self.VCTools,
+                                    self.VSTools,
+                                    self.VsTDb,
+                                    self.SdkTools,
+                                    self.SdkSetup,
+                                    self.FxTools,
+                                    self.MSBuild,
+                                    self.HTMLHelpWorkshop,
+                                    self.FSharp],
+                                   exists),
+        )
+        if self.vc_ver >= 14 and os.path.isfile(self.VCRuntimeRedist):
+            env['py_vcruntime_redist'] = self.VCRuntimeRedist
+        return env
+
+    def _build_paths(self, name, spec_path_lists, exists):
+        """
+        Given an environment variable name and specified paths,
+        return a pathsep-separated string of paths containing
+        unique, extant, directories from those paths and from
+        the environment variable. Raise an error if no paths
+        are resolved.
+        """
+        # flatten spec_path_lists
+        spec_paths = itertools.chain.from_iterable(spec_path_lists)
+        env_paths = safe_env.get(name, '').split(os.pathsep)
+        paths = itertools.chain(spec_paths, env_paths)
+        extant_paths = list(filter(os.path.isdir, paths)) if exists else paths
+        if not extant_paths:
+            msg = "%s environment variable is empty" % name.upper()
+            raise distutils.errors.DistutilsPlatformError(msg)
+        unique_paths = self._unique_everseen(extant_paths)
+        return os.pathsep.join(unique_paths)
+
+    # from Python docs
+    def _unique_everseen(self, iterable, key=None):
+        """
+        List unique elements, preserving order.
+        Remember all elements ever seen.
+
+        _unique_everseen('AAAABBBCCDAABBB') --> A B C D
+
+        _unique_everseen('ABBCcAD', str.lower) --> A B C D
+        """
+        seen = set()
+        seen_add = seen.add
+        if key is None:
+            for element in filterfalse(seen.__contains__, iterable):
+                seen_add(element)
+                yield element
+        else:
+            for element in iterable:
+                k = key(element)
+                if k not in seen:
+                    seen_add(k)
+                    yield element
diff --git a/setuptools/namespaces.py b/setuptools/namespaces.py
new file mode 100755
index 0000000..dc16106
--- /dev/null
+++ b/setuptools/namespaces.py
@@ -0,0 +1,107 @@
+import os
+from distutils import log
+import itertools
+
+from setuptools.extern.six.moves import map
+
+
+flatten = itertools.chain.from_iterable
+
+
+class Installer:
+
+    nspkg_ext = '-nspkg.pth'
+
+    def install_namespaces(self):
+        nsp = self._get_all_ns_packages()
+        if not nsp:
+            return
+        filename, ext = os.path.splitext(self._get_target())
+        filename += self.nspkg_ext
+        self.outputs.append(filename)
+        log.info("Installing %s", filename)
+        lines = map(self._gen_nspkg_line, nsp)
+
+        if self.dry_run:
+            # always generate the lines, even in dry run
+            list(lines)
+            return
+
+        with open(filename, 'wt') as f:
+            f.writelines(lines)
+
+    def uninstall_namespaces(self):
+        filename, ext = os.path.splitext(self._get_target())
+        filename += self.nspkg_ext
+        if not os.path.exists(filename):
+            return
+        log.info("Removing %s", filename)
+        os.remove(filename)
+
+    def _get_target(self):
+        return self.target
+
+    _nspkg_tmpl = (
+        "import sys, types, os",
+        "has_mfs = sys.version_info > (3, 5)",
+        "p = os.path.join(%(root)s, *%(pth)r)",
+        "importlib = has_mfs and __import__('importlib.util')",
+        "has_mfs and __import__('importlib.machinery')",
+        "m = has_mfs and "
+            "sys.modules.setdefault(%(pkg)r, "
+                "importlib.util.module_from_spec("
+                    "importlib.machinery.PathFinder.find_spec(%(pkg)r, "
+                        "[os.path.dirname(p)])))",
+        "m = m or "
+            "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
+        "mp = (m or []) and m.__dict__.setdefault('__path__',[])",
+        "(p not in mp) and mp.append(p)",
+    )
+    "lines for the namespace installer"
+
+    _nspkg_tmpl_multi = (
+        'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
+    )
+    "additional line(s) when a parent package is indicated"
+
+    def _get_root(self):
+        return "sys._getframe(1).f_locals['sitedir']"
+
+    def _gen_nspkg_line(self, pkg):
+        # ensure pkg is not a unicode string under Python 2.7
+        pkg = str(pkg)
+        pth = tuple(pkg.split('.'))
+        root = self._get_root()
+        tmpl_lines = self._nspkg_tmpl
+        parent, sep, child = pkg.rpartition('.')
+        if parent:
+            tmpl_lines += self._nspkg_tmpl_multi
+        return ';'.join(tmpl_lines) % locals() + '\n'
+
+    def _get_all_ns_packages(self):
+        """Return sorted list of all package namespaces"""
+        pkgs = self.distribution.namespace_packages or []
+        return sorted(flatten(map(self._pkg_names, pkgs)))
+
+    @staticmethod
+    def _pkg_names(pkg):
+        """
+        Given a namespace package, yield the components of that
+        package.
+
+        >>> names = Installer._pkg_names('a.b.c')
+        >>> set(names) == set(['a', 'a.b', 'a.b.c'])
+        True
+        """
+        parts = pkg.split('.')
+        while parts:
+            yield '.'.join(parts)
+            parts.pop()
+
+
+class DevelopInstaller(Installer):
+    def _get_root(self):
+        return repr(str(self.egg_path))
+
+    def _get_target(self):
+        return self.egg_link
diff --git a/setuptools/package_index.py b/setuptools/package_index.py
new file mode 100755
index 0000000..b6407be
--- /dev/null
+++ b/setuptools/package_index.py
@@ -0,0 +1,1119 @@
+"""PyPI and direct package downloading"""
+import sys
+import os
+import re
+import shutil
+import socket
+import base64
+import hashlib
+import itertools
+from functools import wraps
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import urllib, http_client, configparser, map
+
+import setuptools
+from pkg_resources import (
+    CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
+    Environment, find_distributions, safe_name, safe_version,
+    to_filename, Requirement, DEVELOP_DIST, EGG_DIST,
+)
+from setuptools import ssl_support
+from distutils import log
+from distutils.errors import DistutilsError
+from fnmatch import translate
+from setuptools.py27compat import get_all_headers
+from setuptools.py33compat import unescape
+from setuptools.wheel import Wheel
+
+EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$')
+HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
+# this is here to fix emacs' cruddy broken syntax highlighting
+PYPI_MD5 = re.compile(
+    '<a href="([^"#]+)">([^<]+)</a>\n\\s+\\(<a (?:title="MD5 hash"\n\\s+)'
+    'href="[^?]+\\?:action=show_md5&amp;digest=([0-9a-f]{32})">md5</a>\\)'
+)
+URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
+EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
+
+__all__ = [
+    'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
+    'interpret_distro_name',
+]
+
+_SOCKET_TIMEOUT = 15
+
+_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
+user_agent = _tmpl.format(py_major=sys.version[:3], setuptools=setuptools)
+
+
+def parse_requirement_arg(spec):
+    try:
+        return Requirement.parse(spec)
+    except ValueError:
+        raise DistutilsError(
+            "Not a URL, existing file, or requirement spec: %r" % (spec,)
+        )
+
+
+def parse_bdist_wininst(name):
+    """Return (base,pyversion) or (None,None) for possible .exe name"""
+
+    lower = name.lower()
+    base, py_ver, plat = None, None, None
+
+    if lower.endswith('.exe'):
+        if lower.endswith('.win32.exe'):
+            base = name[:-10]
+            plat = 'win32'
+        elif lower.startswith('.win32-py', -16):
+            py_ver = name[-7:-4]
+            base = name[:-16]
+            plat = 'win32'
+        elif lower.endswith('.win-amd64.exe'):
+            base = name[:-14]
+            plat = 'win-amd64'
+        elif lower.startswith('.win-amd64-py', -20):
+            py_ver = name[-7:-4]
+            base = name[:-20]
+            plat = 'win-amd64'
+    return base, py_ver, plat
+
+
+def egg_info_for_url(url):
+    parts = urllib.parse.urlparse(url)
+    scheme, server, path, parameters, query, fragment = parts
+    base = urllib.parse.unquote(path.split('/')[-1])
+    if server == 'sourceforge.net' and base == 'download':  # XXX Yuck
+        base = urllib.parse.unquote(path.split('/')[-2])
+    if '#' in base:
+        base, fragment = base.split('#', 1)
+    return base, fragment
+
+
+def distros_for_url(url, metadata=None):
+    """Yield egg or source distribution objects that might be found at a URL"""
+    base, fragment = egg_info_for_url(url)
+    for dist in distros_for_location(url, base, metadata):
+        yield dist
+    if fragment:
+        match = EGG_FRAGMENT.match(fragment)
+        if match:
+            for dist in interpret_distro_name(
+                url, match.group(1), metadata, precedence=CHECKOUT_DIST
+            ):
+                yield dist
+
+
+def distros_for_location(location, basename, metadata=None):
+    """Yield egg or source distribution objects based on basename"""
+    if basename.endswith('.egg.zip'):
+        basename = basename[:-4]  # strip the .zip
+    if basename.endswith('.egg') and '-' in basename:
+        # only one, unambiguous interpretation
+        return [Distribution.from_location(location, basename, metadata)]
+    if basename.endswith('.whl') and '-' in basename:
+        wheel = Wheel(basename)
+        if not wheel.is_compatible():
+            return []
+        return [Distribution(
+            location=location,
+            project_name=wheel.project_name,
+            version=wheel.version,
+            # Increase priority over eggs.
+            precedence=EGG_DIST + 1,
+        )]
+    if basename.endswith('.exe'):
+        win_base, py_ver, platform = parse_bdist_wininst(basename)
+        if win_base is not None:
+            return interpret_distro_name(
+                location, win_base, metadata, py_ver, BINARY_DIST, platform
+            )
+    # Try source distro extensions (.zip, .tgz, etc.)
+    #
+    for ext in EXTENSIONS:
+        if basename.endswith(ext):
+            basename = basename[:-len(ext)]
+            return interpret_distro_name(location, basename, metadata)
+    return []  # no extension matched
+
+
+def distros_for_filename(filename, metadata=None):
+    """Yield possible egg or source distribution objects based on a filename"""
+    return distros_for_location(
+        normalize_path(filename), os.path.basename(filename), metadata
+    )
+
+
+def interpret_distro_name(
+        location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
+        platform=None
+):
+    """Generate alternative interpretations of a source distro name
+
+    Note: if `location` is a filesystem filename, you should call
+    ``pkg_resources.normalize_path()`` on it before passing it to this
+    routine!
+    """
+    # Generate alternative interpretations of a source distro name
+    # Because some packages are ambiguous as to name/versions split
+    # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
+    # So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
+    # "adns-python, 1.1.0", and "adns-python-1.1.0, no version").  In practice,
+    # the spurious interpretations should be ignored, because in the event
+    # there's also an "adns" package, the spurious "python-1.1.0" version will
+    # compare lower than any numeric version number, and is therefore unlikely
+    # to match a request for it.  It's still a potential problem, though, and
+    # in the long run PyPI and the distutils should go for "safe" names and
+    # versions in distribution archive names (sdist and bdist).
+
+    parts = basename.split('-')
+    if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]):
+        # it is a bdist_dumb, not an sdist -- bail out
+        return
+
+    for p in range(1, len(parts) + 1):
+        yield Distribution(
+            location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
+            py_version=py_version, precedence=precedence,
+            platform=platform
+        )
+
+
+# From Python 2.7 docs
+def unique_everseen(iterable, key=None):
+    "List unique elements, preserving order. Remember all elements ever seen."
+    # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+    # unique_everseen('ABBCcAD', str.lower) --> A B C D
+    seen = set()
+    seen_add = seen.add
+    if key is None:
+        for element in six.moves.filterfalse(seen.__contains__, iterable):
+            seen_add(element)
+            yield element
+    else:
+        for element in iterable:
+            k = key(element)
+            if k not in seen:
+                seen_add(k)
+                yield element
+
+
+def unique_values(func):
+    """
+    Wrap a function returning an iterable such that the resulting iterable
+    only ever yields unique items.
+    """
+
+    @wraps(func)
+    def wrapper(*args, **kwargs):
+        return unique_everseen(func(*args, **kwargs))
+
+    return wrapper
+
+
+REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
+# this line is here to fix emacs' cruddy broken syntax highlighting
+
+
+@unique_values
+def find_external_links(url, page):
+    """Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
+
+    for match in REL.finditer(page):
+        tag, rel = match.groups()
+        rels = set(map(str.strip, rel.lower().split(',')))
+        if 'homepage' in rels or 'download' in rels:
+            for match in HREF.finditer(tag):
+                yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
+
+    for tag in ("<th>Home Page", "<th>Download URL"):
+        pos = page.find(tag)
+        if pos != -1:
+            match = HREF.search(page, pos)
+            if match:
+                yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
+
+
+class ContentChecker(object):
+    """
+    A null content checker that defines the interface for checking content
+    """
+
+    def feed(self, block):
+        """
+        Feed a block of data to the hash.
+        """
+        return
+
+    def is_valid(self):
+        """
+        Check the hash. Return False if validation fails.
+        """
+        return True
+
+    def report(self, reporter, template):
+        """
+        Call reporter with information about the checker (hash name)
+        substituted into the template.
+        """
+        return
+
+
+class HashChecker(ContentChecker):
+    pattern = re.compile(
+        r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
+        r'(?P<expected>[a-f0-9]+)'
+    )
+
+    def __init__(self, hash_name, expected):
+        self.hash_name = hash_name
+        self.hash = hashlib.new(hash_name)
+        self.expected = expected
+
+    @classmethod
+    def from_url(cls, url):
+        "Construct a (possibly null) ContentChecker from a URL"
+        fragment = urllib.parse.urlparse(url)[-1]
+        if not fragment:
+            return ContentChecker()
+        match = cls.pattern.search(fragment)
+        if not match:
+            return ContentChecker()
+        return cls(**match.groupdict())
+
+    def feed(self, block):
+        self.hash.update(block)
+
+    def is_valid(self):
+        return self.hash.hexdigest() == self.expected
+
+    def report(self, reporter, template):
+        msg = template % self.hash_name
+        return reporter(msg)
+
+
+class PackageIndex(Environment):
+    """A distribution index that scans web pages for download URLs"""
+
+    def __init__(
+            self, index_url="https://pypi.org/simple/", hosts=('*',),
+            ca_bundle=None, verify_ssl=True, *args, **kw
+    ):
+        Environment.__init__(self, *args, **kw)
+        self.index_url = index_url + "/" [:not index_url.endswith('/')]
+        self.scanned_urls = {}
+        self.fetched_urls = {}
+        self.package_pages = {}
+        self.allows = re.compile('|'.join(map(translate, hosts))).match
+        self.to_scan = []
+        use_ssl = (
+            verify_ssl
+            and ssl_support.is_available
+            and (ca_bundle or ssl_support.find_ca_bundle())
+        )
+        if use_ssl:
+            self.opener = ssl_support.opener_for(ca_bundle)
+        else:
+            self.opener = urllib.request.urlopen
+
+    def process_url(self, url, retrieve=False):
+        """Evaluate a URL as a possible download, and maybe retrieve it"""
+        if url in self.scanned_urls and not retrieve:
+            return
+        self.scanned_urls[url] = True
+        if not URL_SCHEME(url):
+            self.process_filename(url)
+            return
+        else:
+            dists = list(distros_for_url(url))
+            if dists:
+                if not self.url_ok(url):
+                    return
+                self.debug("Found link: %s", url)
+
+        if dists or not retrieve or url in self.fetched_urls:
+            list(map(self.add, dists))
+            return  # don't need the actual page
+
+        if not self.url_ok(url):
+            self.fetched_urls[url] = True
+            return
+
+        self.info("Reading %s", url)
+        self.fetched_urls[url] = True  # prevent multiple fetch attempts
+        tmpl = "Download error on %s: %%s -- Some packages may not be found!"
+        f = self.open_url(url, tmpl % url)
+        if f is None:
+            return
+        self.fetched_urls[f.url] = True
+        if 'html' not in f.headers.get('content-type', '').lower():
+            f.close()  # not html, we can't process it
+            return
+
+        base = f.url  # handle redirects
+        page = f.read()
+        if not isinstance(page, str):
+            # In Python 3 and got bytes but want str.
+            if isinstance(f, urllib.error.HTTPError):
+                # Errors have no charset, assume latin1:
+                charset = 'latin-1'
+            else:
+                charset = f.headers.get_param('charset') or 'latin-1'
+            page = page.decode(charset, "ignore")
+        f.close()
+        for match in HREF.finditer(page):
+            link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
+            self.process_url(link)
+        if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
+            page = self.process_index(url, page)
+
+    def process_filename(self, fn, nested=False):
+        # process filenames or directories
+        if not os.path.exists(fn):
+            self.warn("Not found: %s", fn)
+            return
+
+        if os.path.isdir(fn) and not nested:
+            path = os.path.realpath(fn)
+            for item in os.listdir(path):
+                self.process_filename(os.path.join(path, item), True)
+
+        dists = distros_for_filename(fn)
+        if dists:
+            self.debug("Found: %s", fn)
+            list(map(self.add, dists))
+
+    def url_ok(self, url, fatal=False):
+        s = URL_SCHEME(url)
+        is_file = s and s.group(1).lower() == 'file'
+        if is_file or self.allows(urllib.parse.urlparse(url)[1]):
+            return True
+        msg = (
+            "\nNote: Bypassing %s (disallowed host; see "
+            "http://bit.ly/2hrImnY for details).\n")
+        if fatal:
+            raise DistutilsError(msg % url)
+        else:
+            self.warn(msg, url)
+
+    def scan_egg_links(self, search_path):
+        dirs = filter(os.path.isdir, search_path)
+        egg_links = (
+            (path, entry)
+            for path in dirs
+            for entry in os.listdir(path)
+            if entry.endswith('.egg-link')
+        )
+        list(itertools.starmap(self.scan_egg_link, egg_links))
+
+    def scan_egg_link(self, path, entry):
+        with open(os.path.join(path, entry)) as raw_lines:
+            # filter non-empty lines
+            lines = list(filter(None, map(str.strip, raw_lines)))
+
+        if len(lines) != 2:
+            # format is not recognized; punt
+            return
+
+        egg_path, setup_path = lines
+
+        for dist in find_distributions(os.path.join(path, egg_path)):
+            dist.location = os.path.join(path, *lines)
+            dist.precedence = SOURCE_DIST
+            self.add(dist)
+
+    def process_index(self, url, page):
+        """Process the contents of a PyPI page"""
+
+        def scan(link):
+            # Process a URL to see if it's for a package page
+            if link.startswith(self.index_url):
+                parts = list(map(
+                    urllib.parse.unquote, link[len(self.index_url):].split('/')
+                ))
+                if len(parts) == 2 and '#' not in parts[1]:
+                    # it's a package page, sanitize and index it
+                    pkg = safe_name(parts[0])
+                    ver = safe_version(parts[1])
+                    self.package_pages.setdefault(pkg.lower(), {})[link] = True
+                    return to_filename(pkg), to_filename(ver)
+            return None, None
+
+        # process an index page into the package-page index
+        for match in HREF.finditer(page):
+            try:
+                scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
+            except ValueError:
+                pass
+
+        pkg, ver = scan(url)  # ensure this page is in the page index
+        if pkg:
+            # process individual package page
+            for new_url in find_external_links(url, page):
+                # Process the found URL
+                base, frag = egg_info_for_url(new_url)
+                if base.endswith('.py') and not frag:
+                    if ver:
+                        new_url += '#egg=%s-%s' % (pkg, ver)
+                    else:
+                        self.need_version_info(url)
+                self.scan_url(new_url)
+
+            return PYPI_MD5.sub(
+                lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
+            )
+        else:
+            return ""  # no sense double-scanning non-package pages
+
+    def need_version_info(self, url):
+        self.scan_all(
+            "Page at %s links to .py file(s) without version info; an index "
+            "scan is required.", url
+        )
+
+    def scan_all(self, msg=None, *args):
+        if self.index_url not in self.fetched_urls:
+            if msg:
+                self.warn(msg, *args)
+            self.info(
+                "Scanning index of all packages (this may take a while)"
+            )
+        self.scan_url(self.index_url)
+
+    def find_packages(self, requirement):
+        self.scan_url(self.index_url + requirement.unsafe_name + '/')
+
+        if not self.package_pages.get(requirement.key):
+            # Fall back to safe version of the name
+            self.scan_url(self.index_url + requirement.project_name + '/')
+
+        if not self.package_pages.get(requirement.key):
+            # We couldn't find the target package, so search the index page too
+            self.not_found_in_index(requirement)
+
+        for url in list(self.package_pages.get(requirement.key, ())):
+            # scan each page that might be related to the desired package
+            self.scan_url(url)
+
+    def obtain(self, requirement, installer=None):
+        self.prescan()
+        self.find_packages(requirement)
+        for dist in self[requirement.key]:
+            if dist in requirement:
+                return dist
+            self.debug("%s does not match %s", requirement, dist)
+        return super(PackageIndex, self).obtain(requirement, installer)
+
+    def check_hash(self, checker, filename, tfp):
+        """
+        checker is a ContentChecker
+        """
+        checker.report(
+            self.debug,
+            "Validating %%s checksum for %s" % filename)
+        if not checker.is_valid():
+            tfp.close()
+            os.unlink(filename)
+            raise DistutilsError(
+                "%s validation failed for %s; "
+                "possible download problem?"
+                % (checker.hash.name, os.path.basename(filename))
+            )
+
+    def add_find_links(self, urls):
+        """Add `urls` to the list that will be prescanned for searches"""
+        for url in urls:
+            if (
+                self.to_scan is None  # if we have already "gone online"
+                or not URL_SCHEME(url)  # or it's a local file/directory
+                or url.startswith('file:')
+                or list(distros_for_url(url))  # or a direct package link
+            ):
+                # then go ahead and process it now
+                self.scan_url(url)
+            else:
+                # otherwise, defer retrieval till later
+                self.to_scan.append(url)
+
+    def prescan(self):
+        """Scan urls scheduled for prescanning (e.g. --find-links)"""
+        if self.to_scan:
+            list(map(self.scan_url, self.to_scan))
+        self.to_scan = None  # from now on, go ahead and process immediately
+
+    def not_found_in_index(self, requirement):
+        if self[requirement.key]:  # we've seen at least one distro
+            meth, msg = self.info, "Couldn't retrieve index page for %r"
+        else:  # no distros seen for this name, might be misspelled
+            meth, msg = (
+                self.warn,
+                "Couldn't find index page for %r (maybe misspelled?)")
+        meth(msg, requirement.unsafe_name)
+        self.scan_all()
+
+    def download(self, spec, tmpdir):
+        """Locate and/or download `spec` to `tmpdir`, returning a local path
+
+        `spec` may be a ``Requirement`` object, or a string containing a URL,
+        an existing local filename, or a project/version requirement spec
+        (i.e. the string form of a ``Requirement`` object).  If it is the URL
+        of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
+        that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
+        automatically created alongside the downloaded file.
+
+        If `spec` is a ``Requirement`` object or a string containing a
+        project/version requirement spec, this method returns the location of
+        a matching distribution (possibly after downloading it to `tmpdir`).
+        If `spec` is a locally existing file or directory name, it is simply
+        returned unchanged.  If `spec` is a URL, it is downloaded to a subpath
+        of `tmpdir`, and the local filename is returned.  Various errors may be
+        raised if a problem occurs during downloading.
+        """
+        if not isinstance(spec, Requirement):
+            scheme = URL_SCHEME(spec)
+            if scheme:
+                # It's a url, download it to tmpdir
+                found = self._download_url(scheme.group(1), spec, tmpdir)
+                base, fragment = egg_info_for_url(spec)
+                if base.endswith('.py'):
+                    found = self.gen_setup(found, fragment, tmpdir)
+                return found
+            elif os.path.exists(spec):
+                # Existing file or directory, just return it
+                return spec
+            else:
+                spec = parse_requirement_arg(spec)
+        return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
+
+    def fetch_distribution(
+            self, requirement, tmpdir, force_scan=False, source=False,
+            develop_ok=False, local_index=None):
+        """Obtain a distribution suitable for fulfilling `requirement`
+
+        `requirement` must be a ``pkg_resources.Requirement`` instance.
+        If necessary, or if the `force_scan` flag is set, the requirement is
+        searched for in the (online) package index as well as the locally
+        installed packages.  If a distribution matching `requirement` is found,
+        the returned distribution's ``location`` is the value you would have
+        gotten from calling the ``download()`` method with the matching
+        distribution's URL or filename.  If no matching distribution is found,
+        ``None`` is returned.
+
+        If the `source` flag is set, only source distributions and source
+        checkout links will be considered.  Unless the `develop_ok` flag is
+        set, development and system eggs (i.e., those using the ``.egg-info``
+        format) will be ignored.
+        """
+        # process a Requirement
+        self.info("Searching for %s", requirement)
+        skipped = {}
+        dist = None
+
+        def find(req, env=None):
+            if env is None:
+                env = self
+            # Find a matching distribution; may be called more than once
+
+            for dist in env[req.key]:
+
+                if dist.precedence == DEVELOP_DIST and not develop_ok:
+                    if dist not in skipped:
+                        self.warn(
+                            "Skipping development or system egg: %s", dist,
+                        )
+                        skipped[dist] = 1
+                    continue
+
+                test = (
+                    dist in req
+                    and (dist.precedence <= SOURCE_DIST or not source)
+                )
+                if test:
+                    loc = self.download(dist.location, tmpdir)
+                    dist.download_location = loc
+                    if os.path.exists(dist.download_location):
+                        return dist
+
+        if force_scan:
+            self.prescan()
+            self.find_packages(requirement)
+            dist = find(requirement)
+
+        if not dist and local_index is not None:
+            dist = find(requirement, local_index)
+
+        if dist is None:
+            if self.to_scan is not None:
+                self.prescan()
+            dist = find(requirement)
+
+        if dist is None and not force_scan:
+            self.find_packages(requirement)
+            dist = find(requirement)
+
+        if dist is None:
+            self.warn(
+                "No local packages or working download links found for %s%s",
+                (source and "a source distribution of " or ""),
+                requirement,
+            )
+        else:
+            self.info("Best match: %s", dist)
+            return dist.clone(location=dist.download_location)
+
+    def fetch(self, requirement, tmpdir, force_scan=False, source=False):
+        """Obtain a file suitable for fulfilling `requirement`
+
+        DEPRECATED; use the ``fetch_distribution()`` method now instead.  For
+        backward compatibility, this routine is identical but returns the
+        ``location`` of the downloaded distribution instead of a distribution
+        object.
+        """
+        dist = self.fetch_distribution(requirement, tmpdir, force_scan, source)
+        if dist is not None:
+            return dist.location
+        return None
+
+    def gen_setup(self, filename, fragment, tmpdir):
+        match = EGG_FRAGMENT.match(fragment)
+        dists = match and [
+            d for d in
+            interpret_distro_name(filename, match.group(1), None) if d.version
+        ] or []
+
+        if len(dists) == 1:  # unambiguous ``#egg`` fragment
+            basename = os.path.basename(filename)
+
+            # Make sure the file has been downloaded to the temp dir.
+            if os.path.dirname(filename) != tmpdir:
+                dst = os.path.join(tmpdir, basename)
+                from setuptools.command.easy_install import samefile
+                if not samefile(filename, dst):
+                    shutil.copy2(filename, dst)
+                    filename = dst
+
+            with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
+                file.write(
+                    "from setuptools import setup\n"
+                    "setup(name=%r, version=%r, py_modules=[%r])\n"
+                    % (
+                        dists[0].project_name, dists[0].version,
+                        os.path.splitext(basename)[0]
+                    )
+                )
+            return filename
+
+        elif match:
+            raise DistutilsError(
+                "Can't unambiguously interpret project/version identifier %r; "
+                "any dashes in the name or version should be escaped using "
+                "underscores. %r" % (fragment, dists)
+            )
+        else:
+            raise DistutilsError(
+                "Can't process plain .py files without an '#egg=name-version'"
+                " suffix to enable automatic setup script generation."
+            )
+
+    dl_blocksize = 8192
+
+    def _download_to(self, url, filename):
+        self.info("Downloading %s", url)
+        # Download the file
+        fp = None
+        try:
+            checker = HashChecker.from_url(url)
+            fp = self.open_url(url)
+            if isinstance(fp, urllib.error.HTTPError):
+                raise DistutilsError(
+                    "Can't download %s: %s %s" % (url, fp.code, fp.msg)
+                )
+            headers = fp.info()
+            blocknum = 0
+            bs = self.dl_blocksize
+            size = -1
+            if "content-length" in headers:
+                # Some servers return multiple Content-Length headers :(
+                sizes = get_all_headers(headers, 'Content-Length')
+                size = max(map(int, sizes))
+                self.reporthook(url, filename, blocknum, bs, size)
+            with open(filename, 'wb') as tfp:
+                while True:
+                    block = fp.read(bs)
+                    if block:
+                        checker.feed(block)
+                        tfp.write(block)
+                        blocknum += 1
+                        self.reporthook(url, filename, blocknum, bs, size)
+                    else:
+                        break
+                self.check_hash(checker, filename, tfp)
+            return headers
+        finally:
+            if fp:
+                fp.close()
+
+    def reporthook(self, url, filename, blocknum, blksize, size):
+        pass  # no-op
+
+    def open_url(self, url, warning=None):
+        if url.startswith('file:'):
+            return local_open(url)
+        try:
+            return open_with_auth(url, self.opener)
+        except (ValueError, http_client.InvalidURL) as v:
+            msg = ' '.join([str(arg) for arg in v.args])
+            if warning:
+                self.warn(warning, msg)
+            else:
+                raise DistutilsError('%s %s' % (url, msg))
+        except urllib.error.HTTPError as v:
+            return v
+        except urllib.error.URLError as v:
+            if warning:
+                self.warn(warning, v.reason)
+            else:
+                raise DistutilsError("Download error for %s: %s"
+                                     % (url, v.reason))
+        except http_client.BadStatusLine as v:
+            if warning:
+                self.warn(warning, v.line)
+            else:
+                raise DistutilsError(
+                    '%s returned a bad status line. The server might be '
+                    'down, %s' %
+                    (url, v.line)
+                )
+        except (http_client.HTTPException, socket.error) as v:
+            if warning:
+                self.warn(warning, v)
+            else:
+                raise DistutilsError("Download error for %s: %s"
+                                     % (url, v))
+
+    def _download_url(self, scheme, url, tmpdir):
+        # Determine download filename
+        #
+        name, fragment = egg_info_for_url(url)
+        if name:
+            while '..' in name:
+                name = name.replace('..', '.').replace('\\', '_')
+        else:
+            name = "__downloaded__"  # default if URL has no path contents
+
+        if name.endswith('.egg.zip'):
+            name = name[:-4]  # strip the extra .zip before download
+
+        filename = os.path.join(tmpdir, name)
+
+        # Download the file
+        #
+        if scheme == 'svn' or scheme.startswith('svn+'):
+            return self._download_svn(url, filename)
+        elif scheme == 'git' or scheme.startswith('git+'):
+            return self._download_git(url, filename)
+        elif scheme.startswith('hg+'):
+            return self._download_hg(url, filename)
+        elif scheme == 'file':
+            return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
+        else:
+            self.url_ok(url, True)  # raises error if not allowed
+            return self._attempt_download(url, filename)
+
+    def scan_url(self, url):
+        self.process_url(url, True)
+
+    def _attempt_download(self, url, filename):
+        headers = self._download_to(url, filename)
+        if 'html' in headers.get('content-type', '').lower():
+            return self._download_html(url, headers, filename)
+        else:
+            return filename
+
+    def _download_html(self, url, headers, filename):
+        file = open(filename)
+        for line in file:
+            if line.strip():
+                # Check for a subversion index page
+                if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
+                    # it's a subversion index page:
+                    file.close()
+                    os.unlink(filename)
+                    return self._download_svn(url, filename)
+                break  # not an index page
+        file.close()
+        os.unlink(filename)
+        raise DistutilsError("Unexpected HTML page found at " + url)
+
+    def _download_svn(self, url, filename):
+        url = url.split('#', 1)[0]  # remove any fragment for svn's sake
+        creds = ''
+        if url.lower().startswith('svn:') and '@' in url:
+            scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
+            if not netloc and path.startswith('//') and '/' in path[2:]:
+                netloc, path = path[2:].split('/', 1)
+                auth, host = urllib.parse.splituser(netloc)
+                if auth:
+                    if ':' in auth:
+                        user, pw = auth.split(':', 1)
+                        creds = " --username=%s --password=%s" % (user, pw)
+                    else:
+                        creds = " --username=" + auth
+                    netloc = host
+                    parts = scheme, netloc, url, p, q, f
+                    url = urllib.parse.urlunparse(parts)
+        self.info("Doing subversion checkout from %s to %s", url, filename)
+        os.system("svn checkout%s -q %s %s" % (creds, url, filename))
+        return filename
+
+    @staticmethod
+    def _vcs_split_rev_from_url(url, pop_prefix=False):
+        scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
+
+        scheme = scheme.split('+', 1)[-1]
+
+        # Some fragment identification fails
+        path = path.split('#', 1)[0]
+
+        rev = None
+        if '@' in path:
+            path, rev = path.rsplit('@', 1)
+
+        # Also, discard fragment
+        url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
+
+        return url, rev
+
+    def _download_git(self, url, filename):
+        filename = filename.split('#', 1)[0]
+        url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
+
+        self.info("Doing git clone from %s to %s", url, filename)
+        os.system("git clone --quiet %s %s" % (url, filename))
+
+        if rev is not None:
+            self.info("Checking out %s", rev)
+            os.system("(cd %s && git checkout --quiet %s)" % (
+                filename,
+                rev,
+            ))
+
+        return filename
+
+    def _download_hg(self, url, filename):
+        filename = filename.split('#', 1)[0]
+        url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
+
+        self.info("Doing hg clone from %s to %s", url, filename)
+        os.system("hg clone --quiet %s %s" % (url, filename))
+
+        if rev is not None:
+            self.info("Updating to %s", rev)
+            os.system("(cd %s && hg up -C -r %s -q)" % (
+                filename,
+                rev,
+            ))
+
+        return filename
+
+    def debug(self, msg, *args):
+        log.debug(msg, *args)
+
+    def info(self, msg, *args):
+        log.info(msg, *args)
+
+    def warn(self, msg, *args):
+        log.warn(msg, *args)
+
+
+# This pattern matches a character entity reference (a decimal numeric
+# references, a hexadecimal numeric reference, or a named reference).
+entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
+
+
+def decode_entity(match):
+    what = match.group(1)
+    return unescape(what)
+
+
+def htmldecode(text):
+    """Decode HTML entities in the given text."""
+    return entity_sub(decode_entity, text)
+
+
+def socket_timeout(timeout=15):
+    def _socket_timeout(func):
+        def _socket_timeout(*args, **kwargs):
+            old_timeout = socket.getdefaulttimeout()
+            socket.setdefaulttimeout(timeout)
+            try:
+                return func(*args, **kwargs)
+            finally:
+                socket.setdefaulttimeout(old_timeout)
+
+        return _socket_timeout
+
+    return _socket_timeout
+
+
+def _encode_auth(auth):
+    """
+    A function compatible with Python 2.3-3.3 that will encode
+    auth from a URL suitable for an HTTP header.
+    >>> str(_encode_auth('username%3Apassword'))
+    'dXNlcm5hbWU6cGFzc3dvcmQ='
+
+    Long auth strings should not cause a newline to be inserted.
+    >>> long_auth = 'username:' + 'password'*10
+    >>> chr(10) in str(_encode_auth(long_auth))
+    False
+    """
+    auth_s = urllib.parse.unquote(auth)
+    # convert to bytes
+    auth_bytes = auth_s.encode()
+    # use the legacy interface for Python 2.3 support
+    encoded_bytes = base64.encodestring(auth_bytes)
+    # convert back to a string
+    encoded = encoded_bytes.decode()
+    # strip the trailing carriage return
+    return encoded.replace('\n', '')
+
+
+class Credential(object):
+    """
+    A username/password pair. Use like a namedtuple.
+    """
+
+    def __init__(self, username, password):
+        self.username = username
+        self.password = password
+
+    def __iter__(self):
+        yield self.username
+        yield self.password
+
+    def __str__(self):
+        return '%(username)s:%(password)s' % vars(self)
+
+
+class PyPIConfig(configparser.RawConfigParser):
+    def __init__(self):
+        """
+        Load from ~/.pypirc
+        """
+        defaults = dict.fromkeys(['username', 'password', 'repository'], '')
+        configparser.RawConfigParser.__init__(self, defaults)
+
+        rc = os.path.join(os.path.expanduser('~'), '.pypirc')
+        if os.path.exists(rc):
+            self.read(rc)
+
+    @property
+    def creds_by_repository(self):
+        sections_with_repositories = [
+            section for section in self.sections()
+            if self.get(section, 'repository').strip()
+        ]
+
+        return dict(map(self._get_repo_cred, sections_with_repositories))
+
+    def _get_repo_cred(self, section):
+        repo = self.get(section, 'repository').strip()
+        return repo, Credential(
+            self.get(section, 'username').strip(),
+            self.get(section, 'password').strip(),
+        )
+
+    def find_credential(self, url):
+        """
+        If the URL indicated appears to be a repository defined in this
+        config, return the credential for that repository.
+        """
+        for repository, cred in self.creds_by_repository.items():
+            if url.startswith(repository):
+                return cred
+
+
+def open_with_auth(url, opener=urllib.request.urlopen):
+    """Open a urllib2 request, handling HTTP authentication"""
+
+    scheme, netloc, path, params, query, frag = urllib.parse.urlparse(url)
+
+    # Double scheme does not raise on Mac OS X as revealed by a
+    # failing test. We would expect "nonnumeric port". Refs #20.
+    if netloc.endswith(':'):
+        raise http_client.InvalidURL("nonnumeric port: ''")
+
+    if scheme in ('http', 'https'):
+        auth, host = urllib.parse.splituser(netloc)
+    else:
+        auth = None
+
+    if not auth:
+        cred = PyPIConfig().find_credential(url)
+        if cred:
+            auth = str(cred)
+            info = cred.username, url
+            log.info('Authenticating as %s for %s (from .pypirc)', *info)
+
+    if auth:
+        auth = "Basic " + _encode_auth(auth)
+        parts = scheme, host, path, params, query, frag
+        new_url = urllib.parse.urlunparse(parts)
+        request = urllib.request.Request(new_url)
+        request.add_header("Authorization", auth)
+    else:
+        request = urllib.request.Request(url)
+
+    request.add_header('User-Agent', user_agent)
+    fp = opener(request)
+
+    if auth:
+        # Put authentication info back into request URL if same host,
+        # so that links found on the page will work
+        s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
+        if s2 == scheme and h2 == host:
+            parts = s2, netloc, path2, param2, query2, frag2
+            fp.url = urllib.parse.urlunparse(parts)
+
+    return fp
+
+
+# adding a timeout to avoid freezing package_index
+open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
+
+
+def fix_sf_url(url):
+    return url  # backward compatibility
+
+
+def local_open(url):
+    """Read a local path, with special support for directories"""
+    scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
+    filename = urllib.request.url2pathname(path)
+    if os.path.isfile(filename):
+        return urllib.request.urlopen(url)
+    elif path.endswith('/') and os.path.isdir(filename):
+        files = []
+        for f in os.listdir(filename):
+            filepath = os.path.join(filename, f)
+            if f == 'index.html':
+                with open(filepath, 'r') as fp:
+                    body = fp.read()
+                break
+            elif os.path.isdir(filepath):
+                f += '/'
+            files.append('<a href="{name}">{name}</a>'.format(name=f))
+        else:
+            tmpl = (
+                "<html><head><title>{url}</title>"
+                "</head><body>{files}</body></html>")
+            body = tmpl.format(url=url, files='\n'.join(files))
+        status, message = 200, "OK"
+    else:
+        status, message, body = 404, "Path not found", "Not found"
+
+    headers = {'content-type': 'text/html'}
+    body_stream = six.StringIO(body)
+    return urllib.error.HTTPError(url, status, message, headers, body_stream)
diff --git a/setuptools/pep425tags.py b/setuptools/pep425tags.py
new file mode 100644
index 0000000..3bdd328
--- /dev/null
+++ b/setuptools/pep425tags.py
@@ -0,0 +1,317 @@
+# This file originally from pip:
+# https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/pep425tags.py
+"""Generate and work with PEP 425 Compatibility Tags."""
+from __future__ import absolute_import
+
+import distutils.util
+from distutils import log
+import platform
+import re
+import sys
+import sysconfig
+import warnings
+from collections import OrderedDict
+
+from . import glibc
+
+_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)')
+
+
+def get_config_var(var):
+    try:
+        return sysconfig.get_config_var(var)
+    except IOError as e:  # Issue #1074
+        warnings.warn("{}".format(e), RuntimeWarning)
+        return None
+
+
+def get_abbr_impl():
+    """Return abbreviated implementation name."""
+    if hasattr(sys, 'pypy_version_info'):
+        pyimpl = 'pp'
+    elif sys.platform.startswith('java'):
+        pyimpl = 'jy'
+    elif sys.platform == 'cli':
+        pyimpl = 'ip'
+    else:
+        pyimpl = 'cp'
+    return pyimpl
+
+
+def get_impl_ver():
+    """Return implementation version."""
+    impl_ver = get_config_var("py_version_nodot")
+    if not impl_ver or get_abbr_impl() == 'pp':
+        impl_ver = ''.join(map(str, get_impl_version_info()))
+    return impl_ver
+
+
+def get_impl_version_info():
+    """Return sys.version_info-like tuple for use in decrementing the minor
+    version."""
+    if get_abbr_impl() == 'pp':
+        # as per https://github.com/pypa/pip/issues/2882
+        return (sys.version_info[0], sys.pypy_version_info.major,
+                sys.pypy_version_info.minor)
+    else:
+        return sys.version_info[0], sys.version_info[1]
+
+
+def get_impl_tag():
+    """
+    Returns the Tag for this specific implementation.
+    """
+    return "{}{}".format(get_abbr_impl(), get_impl_ver())
+
+
+def get_flag(var, fallback, expected=True, warn=True):
+    """Use a fallback method for determining SOABI flags if the needed config
+    var is unset or unavailable."""
+    val = get_config_var(var)
+    if val is None:
+        if warn:
+            log.debug("Config variable '%s' is unset, Python ABI tag may "
+                      "be incorrect", var)
+        return fallback()
+    return val == expected
+
+
+def get_abi_tag():
+    """Return the ABI tag based on SOABI (if available) or emulate SOABI
+    (CPython 2, PyPy)."""
+    soabi = get_config_var('SOABI')
+    impl = get_abbr_impl()
+    if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'):
+        d = ''
+        m = ''
+        u = ''
+        if get_flag('Py_DEBUG',
+                    lambda: hasattr(sys, 'gettotalrefcount'),
+                    warn=(impl == 'cp')):
+            d = 'd'
+        if get_flag('WITH_PYMALLOC',
+                    lambda: impl == 'cp',
+                    warn=(impl == 'cp')):
+            m = 'm'
+        if get_flag('Py_UNICODE_SIZE',
+                    lambda: sys.maxunicode == 0x10ffff,
+                    expected=4,
+                    warn=(impl == 'cp' and
+                          sys.version_info < (3, 3))) \
+                and sys.version_info < (3, 3):
+            u = 'u'
+        abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
+    elif soabi and soabi.startswith('cpython-'):
+        abi = 'cp' + soabi.split('-')[1]
+    elif soabi:
+        abi = soabi.replace('.', '_').replace('-', '_')
+    else:
+        abi = None
+    return abi
+
+
+def _is_running_32bit():
+    return sys.maxsize == 2147483647
+
+
+def get_platform():
+    """Return our platform name 'win32', 'linux_x86_64'"""
+    if sys.platform == 'darwin':
+        # distutils.util.get_platform() returns the release based on the value
+        # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may
+        # be significantly older than the user's current machine.
+        release, _, machine = platform.mac_ver()
+        split_ver = release.split('.')
+
+        if machine == "x86_64" and _is_running_32bit():
+            machine = "i386"
+        elif machine == "ppc64" and _is_running_32bit():
+            machine = "ppc"
+
+        return 'macosx_{}_{}_{}'.format(split_ver[0], split_ver[1], machine)
+
+    # XXX remove distutils dependency
+    result = distutils.util.get_platform().replace('.', '_').replace('-', '_')
+    if result == "linux_x86_64" and _is_running_32bit():
+        # 32 bit Python program (running on a 64 bit Linux): pip should only
+        # install and run 32 bit compiled extensions in that case.
+        result = "linux_i686"
+
+    return result
+
+
+def is_manylinux1_compatible():
+    # Only Linux, and only x86-64 / i686
+    if get_platform() not in {"linux_x86_64", "linux_i686"}:
+        return False
+
+    # Check for presence of _manylinux module
+    try:
+        import _manylinux
+        return bool(_manylinux.manylinux1_compatible)
+    except (ImportError, AttributeError):
+        # Fall through to heuristic check below
+        pass
+
+    # Check glibc version. CentOS 5 uses glibc 2.5.
+    return glibc.have_compatible_glibc(2, 5)
+
+
+def get_darwin_arches(major, minor, machine):
+    """Return a list of supported arches (including group arches) for
+    the given major, minor and machine architecture of an macOS machine.
+    """
+    arches = []
+
+    def _supports_arch(major, minor, arch):
+        # Looking at the application support for macOS versions in the chart
+        # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears
+        # our timeline looks roughly like:
+        #
+        # 10.0 - Introduces ppc support.
+        # 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64
+        #        and x86_64 support is CLI only, and cannot be used for GUI
+        #        applications.
+        # 10.5 - Extends ppc64 and x86_64 support to cover GUI applications.
+        # 10.6 - Drops support for ppc64
+        # 10.7 - Drops support for ppc
+        #
+        # Given that we do not know if we're installing a CLI or a GUI
+        # application, we must be conservative and assume it might be a GUI
+        # application and behave as if ppc64 and x86_64 support did not occur
+        # until 10.5.
+        #
+        # Note: The above information is taken from the "Application support"
+        #       column in the chart not the "Processor support" since I believe
+        #       that we care about what instruction sets an application can use
+        #       not which processors the OS supports.
+        if arch == 'ppc':
+            return (major, minor) <= (10, 5)
+        if arch == 'ppc64':
+            return (major, minor) == (10, 5)
+        if arch == 'i386':
+            return (major, minor) >= (10, 4)
+        if arch == 'x86_64':
+            return (major, minor) >= (10, 5)
+        if arch in groups:
+            for garch in groups[arch]:
+                if _supports_arch(major, minor, garch):
+                    return True
+        return False
+
+    groups = OrderedDict([
+        ("fat", ("i386", "ppc")),
+        ("intel", ("x86_64", "i386")),
+        ("fat64", ("x86_64", "ppc64")),
+        ("fat32", ("x86_64", "i386", "ppc")),
+    ])
+
+    if _supports_arch(major, minor, machine):
+        arches.append(machine)
+
+    for garch in groups:
+        if machine in groups[garch] and _supports_arch(major, minor, garch):
+            arches.append(garch)
+
+    arches.append('universal')
+
+    return arches
+
+
+def get_supported(versions=None, noarch=False, platform=None,
+                  impl=None, abi=None):
+    """Return a list of supported tags for each version specified in
+    `versions`.
+
+    :param versions: a list of string versions, of the form ["33", "32"],
+        or None. The first version will be assumed to support our ABI.
+    :param platform: specify the exact platform you want valid
+        tags for, or None. If None, use the local system platform.
+    :param impl: specify the exact implementation you want valid
+        tags for, or None. If None, use the local interpreter impl.
+    :param abi: specify the exact abi you want valid
+        tags for, or None. If None, use the local interpreter abi.
+    """
+    supported = []
+
+    # Versions must be given with respect to the preference
+    if versions is None:
+        versions = []
+        version_info = get_impl_version_info()
+        major = version_info[:-1]
+        # Support all previous minor Python versions.
+        for minor in range(version_info[-1], -1, -1):
+            versions.append(''.join(map(str, major + (minor,))))
+
+    impl = impl or get_abbr_impl()
+
+    abis = []
+
+    abi = abi or get_abi_tag()
+    if abi:
+        abis[0:0] = [abi]
+
+    abi3s = set()
+    import imp
+    for suffix in imp.get_suffixes():
+        if suffix[0].startswith('.abi'):
+            abi3s.add(suffix[0].split('.', 2)[1])
+
+    abis.extend(sorted(list(abi3s)))
+
+    abis.append('none')
+
+    if not noarch:
+        arch = platform or get_platform()
+        if arch.startswith('macosx'):
+            # support macosx-10.6-intel on macosx-10.9-x86_64
+            match = _osx_arch_pat.match(arch)
+            if match:
+                name, major, minor, actual_arch = match.groups()
+                tpl = '{}_{}_%i_%s'.format(name, major)
+                arches = []
+                for m in reversed(range(int(minor) + 1)):
+                    for a in get_darwin_arches(int(major), m, actual_arch):
+                        arches.append(tpl % (m, a))
+            else:
+                # arch pattern didn't match (?!)
+                arches = [arch]
+        elif platform is None and is_manylinux1_compatible():
+            arches = [arch.replace('linux', 'manylinux1'), arch]
+        else:
+            arches = [arch]
+
+        # Current version, current API (built specifically for our Python):
+        for abi in abis:
+            for arch in arches:
+                supported.append(('%s%s' % (impl, versions[0]), abi, arch))
+
+        # abi3 modules compatible with older version of Python
+        for version in versions[1:]:
+            # abi3 was introduced in Python 3.2
+            if version in {'31', '30'}:
+                break
+            for abi in abi3s:   # empty set if not Python 3
+                for arch in arches:
+                    supported.append(("%s%s" % (impl, version), abi, arch))
+
+        # Has binaries, does not use the Python API:
+        for arch in arches:
+            supported.append(('py%s' % (versions[0][0]), 'none', arch))
+
+    # No abi / arch, but requires our implementation:
+    supported.append(('%s%s' % (impl, versions[0]), 'none', 'any'))
+    # Tagged specifically as being cross-version compatible
+    # (with just the major version specified)
+    supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
+
+    # No abi / arch, generic Python
+    for i, version in enumerate(versions):
+        supported.append(('py%s' % (version,), 'none', 'any'))
+        if i == 0:
+            supported.append(('py%s' % (version[0]), 'none', 'any'))
+
+    return supported
+
+
+implementation_tag = get_impl_tag()
diff --git a/setuptools/py27compat.py b/setuptools/py27compat.py
new file mode 100644
index 0000000..2985011
--- /dev/null
+++ b/setuptools/py27compat.py
@@ -0,0 +1,28 @@
+"""
+Compatibility Support for Python 2.7 and earlier
+"""
+
+import platform
+
+from setuptools.extern import six
+
+
+def get_all_headers(message, key):
+    """
+    Given an HTTPMessage, return all headers matching a given key.
+    """
+    return message.get_all(key)
+
+
+if six.PY2:
+    def get_all_headers(message, key):
+        return message.getheaders(key)
+
+
+linux_py2_ascii = (
+    platform.system() == 'Linux' and
+    six.PY2
+)
+
+rmtree_safe = str if linux_py2_ascii else lambda x: x
+"""Workaround for http://bugs.python.org/issue24672"""
diff --git a/setuptools/py31compat.py b/setuptools/py31compat.py
new file mode 100644
index 0000000..4ea9532
--- /dev/null
+++ b/setuptools/py31compat.py
@@ -0,0 +1,41 @@
+__all__ = ['get_config_vars', 'get_path']
+
+try:
+    # Python 2.7 or >=3.2
+    from sysconfig import get_config_vars, get_path
+except ImportError:
+    from distutils.sysconfig import get_config_vars, get_python_lib
+
+    def get_path(name):
+        if name not in ('platlib', 'purelib'):
+            raise ValueError("Name must be purelib or platlib")
+        return get_python_lib(name == 'platlib')
+
+
+try:
+    # Python >=3.2
+    from tempfile import TemporaryDirectory
+except ImportError:
+    import shutil
+    import tempfile
+
+    class TemporaryDirectory(object):
+        """
+        Very simple temporary directory context manager.
+        Will try to delete afterward, but will also ignore OS and similar
+        errors on deletion.
+        """
+
+        def __init__(self):
+            self.name = None  # Handle mkdtemp raising an exception
+            self.name = tempfile.mkdtemp()
+
+        def __enter__(self):
+            return self.name
+
+        def __exit__(self, exctype, excvalue, exctrace):
+            try:
+                shutil.rmtree(self.name, True)
+            except OSError:  # removal errors are not the only possible
+                pass
+            self.name = None
diff --git a/setuptools/py33compat.py b/setuptools/py33compat.py
new file mode 100644
index 0000000..2a73ebb
--- /dev/null
+++ b/setuptools/py33compat.py
@@ -0,0 +1,54 @@
+import dis
+import array
+import collections
+
+try:
+    import html
+except ImportError:
+    html = None
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import html_parser
+
+
+OpArg = collections.namedtuple('OpArg', 'opcode arg')
+
+
+class Bytecode_compat(object):
+    def __init__(self, code):
+        self.code = code
+
+    def __iter__(self):
+        """Yield '(op,arg)' pair for each operation in code object 'code'"""
+
+        bytes = array.array('b', self.code.co_code)
+        eof = len(self.code.co_code)
+
+        ptr = 0
+        extended_arg = 0
+
+        while ptr < eof:
+
+            op = bytes[ptr]
+
+            if op >= dis.HAVE_ARGUMENT:
+
+                arg = bytes[ptr + 1] + bytes[ptr + 2] * 256 + extended_arg
+                ptr += 3
+
+                if op == dis.EXTENDED_ARG:
+                    long_type = six.integer_types[-1]
+                    extended_arg = arg * long_type(65536)
+                    continue
+
+            else:
+                arg = None
+                ptr += 1
+
+            yield OpArg(op, arg)
+
+
+Bytecode = getattr(dis, 'Bytecode', Bytecode_compat)
+
+
+unescape = getattr(html, 'unescape', html_parser.HTMLParser().unescape)
diff --git a/setuptools/py36compat.py b/setuptools/py36compat.py
new file mode 100644
index 0000000..f527969
--- /dev/null
+++ b/setuptools/py36compat.py
@@ -0,0 +1,82 @@
+import sys
+from distutils.errors import DistutilsOptionError
+from distutils.util import strtobool
+from distutils.debug import DEBUG
+
+
+class Distribution_parse_config_files:
+    """
+    Mix-in providing forward-compatibility for functionality to be
+    included by default on Python 3.7.
+
+    Do not edit the code in this class except to update functionality
+    as implemented in distutils.
+    """
+    def parse_config_files(self, filenames=None):
+        from configparser import ConfigParser
+
+        # Ignore install directory options if we have a venv
+        if sys.prefix != sys.base_prefix:
+            ignore_options = [
+                'install-base', 'install-platbase', 'install-lib',
+                'install-platlib', 'install-purelib', 'install-headers',
+                'install-scripts', 'install-data', 'prefix', 'exec-prefix',
+                'home', 'user', 'root']
+        else:
+            ignore_options = []
+
+        ignore_options = frozenset(ignore_options)
+
+        if filenames is None:
+            filenames = self.find_config_files()
+
+        if DEBUG:
+            self.announce("Distribution.parse_config_files():")
+
+        parser = ConfigParser(interpolation=None)
+        for filename in filenames:
+            if DEBUG:
+                self.announce("  reading %s" % filename)
+            parser.read(filename)
+            for section in parser.sections():
+                options = parser.options(section)
+                opt_dict = self.get_option_dict(section)
+
+                for opt in options:
+                    if opt != '__name__' and opt not in ignore_options:
+                        val = parser.get(section,opt)
+                        opt = opt.replace('-', '_')
+                        opt_dict[opt] = (filename, val)
+
+            # Make the ConfigParser forget everything (so we retain
+            # the original filenames that options come from)
+            parser.__init__()
+
+        # If there was a "global" section in the config file, use it
+        # to set Distribution options.
+
+        if 'global' in self.command_options:
+            for (opt, (src, val)) in self.command_options['global'].items():
+                alias = self.negative_opt.get(opt)
+                try:
+                    if alias:
+                        setattr(self, alias, not strtobool(val))
+                    elif opt in ('verbose', 'dry_run'): # ugh!
+                        setattr(self, opt, strtobool(val))
+                    else:
+                        setattr(self, opt, val)
+                except ValueError as msg:
+                    raise DistutilsOptionError(msg)
+
+
+if sys.version_info < (3,):
+    # Python 2 behavior is sufficient
+    class Distribution_parse_config_files:
+        pass
+
+
+if False:
+    # When updated behavior is available upstream,
+    # disable override here.
+    class Distribution_parse_config_files:
+        pass
diff --git a/setuptools/sandbox.py b/setuptools/sandbox.py
new file mode 100755
index 0000000..685f3f7
--- /dev/null
+++ b/setuptools/sandbox.py
@@ -0,0 +1,491 @@
+import os
+import sys
+import tempfile
+import operator
+import functools
+import itertools
+import re
+import contextlib
+import pickle
+import textwrap
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import builtins, map
+
+import pkg_resources.py31compat
+
+if sys.platform.startswith('java'):
+    import org.python.modules.posix.PosixModule as _os
+else:
+    _os = sys.modules[os.name]
+try:
+    _file = file
+except NameError:
+    _file = None
+_open = open
+from distutils.errors import DistutilsError
+from pkg_resources import working_set
+
+
+__all__ = [
+    "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
+]
+
+
+def _execfile(filename, globals, locals=None):
+    """
+    Python 3 implementation of execfile.
+    """
+    mode = 'rb'
+    with open(filename, mode) as stream:
+        script = stream.read()
+    if locals is None:
+        locals = globals
+    code = compile(script, filename, 'exec')
+    exec(code, globals, locals)
+
+
+@contextlib.contextmanager
+def save_argv(repl=None):
+    saved = sys.argv[:]
+    if repl is not None:
+        sys.argv[:] = repl
+    try:
+        yield saved
+    finally:
+        sys.argv[:] = saved
+
+
+@contextlib.contextmanager
+def save_path():
+    saved = sys.path[:]
+    try:
+        yield saved
+    finally:
+        sys.path[:] = saved
+
+
+@contextlib.contextmanager
+def override_temp(replacement):
+    """
+    Monkey-patch tempfile.tempdir with replacement, ensuring it exists
+    """
+    pkg_resources.py31compat.makedirs(replacement, exist_ok=True)
+
+    saved = tempfile.tempdir
+
+    tempfile.tempdir = replacement
+
+    try:
+        yield
+    finally:
+        tempfile.tempdir = saved
+
+
+@contextlib.contextmanager
+def pushd(target):
+    saved = os.getcwd()
+    os.chdir(target)
+    try:
+        yield saved
+    finally:
+        os.chdir(saved)
+
+
+class UnpickleableException(Exception):
+    """
+    An exception representing another Exception that could not be pickled.
+    """
+
+    @staticmethod
+    def dump(type, exc):
+        """
+        Always return a dumped (pickled) type and exc. If exc can't be pickled,
+        wrap it in UnpickleableException first.
+        """
+        try:
+            return pickle.dumps(type), pickle.dumps(exc)
+        except Exception:
+            # get UnpickleableException inside the sandbox
+            from setuptools.sandbox import UnpickleableException as cls
+            return cls.dump(cls, cls(repr(exc)))
+
+
+class ExceptionSaver:
+    """
+    A Context Manager that will save an exception, serialized, and restore it
+    later.
+    """
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, exc, tb):
+        if not exc:
+            return
+
+        # dump the exception
+        self._saved = UnpickleableException.dump(type, exc)
+        self._tb = tb
+
+        # suppress the exception
+        return True
+
+    def resume(self):
+        "restore and re-raise any exception"
+
+        if '_saved' not in vars(self):
+            return
+
+        type, exc = map(pickle.loads, self._saved)
+        six.reraise(type, exc, self._tb)
+
+
+@contextlib.contextmanager
+def save_modules():
+    """
+    Context in which imported modules are saved.
+
+    Translates exceptions internal to the context into the equivalent exception
+    outside the context.
+    """
+    saved = sys.modules.copy()
+    with ExceptionSaver() as saved_exc:
+        yield saved
+
+    sys.modules.update(saved)
+    # remove any modules imported since
+    del_modules = (
+        mod_name for mod_name in sys.modules
+        if mod_name not in saved
+        # exclude any encodings modules. See #285
+        and not mod_name.startswith('encodings.')
+    )
+    _clear_modules(del_modules)
+
+    saved_exc.resume()
+
+
+def _clear_modules(module_names):
+    for mod_name in list(module_names):
+        del sys.modules[mod_name]
+
+
+@contextlib.contextmanager
+def save_pkg_resources_state():
+    saved = pkg_resources.__getstate__()
+    try:
+        yield saved
+    finally:
+        pkg_resources.__setstate__(saved)
+
+
+@contextlib.contextmanager
+def setup_context(setup_dir):
+    temp_dir = os.path.join(setup_dir, 'temp')
+    with save_pkg_resources_state():
+        with save_modules():
+            hide_setuptools()
+            with save_path():
+                with save_argv():
+                    with override_temp(temp_dir):
+                        with pushd(setup_dir):
+                            # ensure setuptools commands are available
+                            __import__('setuptools')
+                            yield
+
+
+def _needs_hiding(mod_name):
+    """
+    >>> _needs_hiding('setuptools')
+    True
+    >>> _needs_hiding('pkg_resources')
+    True
+    >>> _needs_hiding('setuptools_plugin')
+    False
+    >>> _needs_hiding('setuptools.__init__')
+    True
+    >>> _needs_hiding('distutils')
+    True
+    >>> _needs_hiding('os')
+    False
+    >>> _needs_hiding('Cython')
+    True
+    """
+    pattern = re.compile(r'(setuptools|pkg_resources|distutils|Cython)(\.|$)')
+    return bool(pattern.match(mod_name))
+
+
+def hide_setuptools():
+    """
+    Remove references to setuptools' modules from sys.modules to allow the
+    invocation to import the most appropriate setuptools. This technique is
+    necessary to avoid issues such as #315 where setuptools upgrading itself
+    would fail to find a function declared in the metadata.
+    """
+    modules = filter(_needs_hiding, sys.modules)
+    _clear_modules(modules)
+
+
+def run_setup(setup_script, args):
+    """Run a distutils setup script, sandboxed in its directory"""
+    setup_dir = os.path.abspath(os.path.dirname(setup_script))
+    with setup_context(setup_dir):
+        try:
+            sys.argv[:] = [setup_script] + list(args)
+            sys.path.insert(0, setup_dir)
+            # reset to include setup dir, w/clean callback list
+            working_set.__init__()
+            working_set.callbacks.append(lambda dist: dist.activate())
+
+            # __file__ should be a byte string on Python 2 (#712)
+            dunder_file = (
+                setup_script
+                if isinstance(setup_script, str) else
+                setup_script.encode(sys.getfilesystemencoding())
+            )
+
+            with DirectorySandbox(setup_dir):
+                ns = dict(__file__=dunder_file, __name__='__main__')
+                _execfile(setup_script, ns)
+        except SystemExit as v:
+            if v.args and v.args[0]:
+                raise
+            # Normal exit, just return
+
+
+class AbstractSandbox:
+    """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
+
+    _active = False
+
+    def __init__(self):
+        self._attrs = [
+            name for name in dir(_os)
+            if not name.startswith('_') and hasattr(self, name)
+        ]
+
+    def _copy(self, source):
+        for name in self._attrs:
+            setattr(os, name, getattr(source, name))
+
+    def __enter__(self):
+        self._copy(self)
+        if _file:
+            builtins.file = self._file
+        builtins.open = self._open
+        self._active = True
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self._active = False
+        if _file:
+            builtins.file = _file
+        builtins.open = _open
+        self._copy(_os)
+
+    def run(self, func):
+        """Run 'func' under os sandboxing"""
+        with self:
+            return func()
+
+    def _mk_dual_path_wrapper(name):
+        original = getattr(_os, name)
+
+        def wrap(self, src, dst, *args, **kw):
+            if self._active:
+                src, dst = self._remap_pair(name, src, dst, *args, **kw)
+            return original(src, dst, *args, **kw)
+
+        return wrap
+
+    for name in ["rename", "link", "symlink"]:
+        if hasattr(_os, name):
+            locals()[name] = _mk_dual_path_wrapper(name)
+
+    def _mk_single_path_wrapper(name, original=None):
+        original = original or getattr(_os, name)
+
+        def wrap(self, path, *args, **kw):
+            if self._active:
+                path = self._remap_input(name, path, *args, **kw)
+            return original(path, *args, **kw)
+
+        return wrap
+
+    if _file:
+        _file = _mk_single_path_wrapper('file', _file)
+    _open = _mk_single_path_wrapper('open', _open)
+    for name in [
+        "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
+        "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
+        "startfile", "mkfifo", "mknod", "pathconf", "access"
+    ]:
+        if hasattr(_os, name):
+            locals()[name] = _mk_single_path_wrapper(name)
+
+    def _mk_single_with_return(name):
+        original = getattr(_os, name)
+
+        def wrap(self, path, *args, **kw):
+            if self._active:
+                path = self._remap_input(name, path, *args, **kw)
+                return self._remap_output(name, original(path, *args, **kw))
+            return original(path, *args, **kw)
+
+        return wrap
+
+    for name in ['readlink', 'tempnam']:
+        if hasattr(_os, name):
+            locals()[name] = _mk_single_with_return(name)
+
+    def _mk_query(name):
+        original = getattr(_os, name)
+
+        def wrap(self, *args, **kw):
+            retval = original(*args, **kw)
+            if self._active:
+                return self._remap_output(name, retval)
+            return retval
+
+        return wrap
+
+    for name in ['getcwd', 'tmpnam']:
+        if hasattr(_os, name):
+            locals()[name] = _mk_query(name)
+
+    def _validate_path(self, path):
+        """Called to remap or validate any path, whether input or output"""
+        return path
+
+    def _remap_input(self, operation, path, *args, **kw):
+        """Called for path inputs"""
+        return self._validate_path(path)
+
+    def _remap_output(self, operation, path):
+        """Called for path outputs"""
+        return self._validate_path(path)
+
+    def _remap_pair(self, operation, src, dst, *args, **kw):
+        """Called for path pairs like rename, link, and symlink operations"""
+        return (
+            self._remap_input(operation + '-from', src, *args, **kw),
+            self._remap_input(operation + '-to', dst, *args, **kw)
+        )
+
+
+if hasattr(os, 'devnull'):
+    _EXCEPTIONS = [os.devnull,]
+else:
+    _EXCEPTIONS = []
+
+
+class DirectorySandbox(AbstractSandbox):
+    """Restrict operations to a single subdirectory - pseudo-chroot"""
+
+    write_ops = dict.fromkeys([
+        "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
+        "utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
+    ])
+
+    _exception_patterns = [
+        # Allow lib2to3 to attempt to save a pickled grammar object (#121)
+        r'.*lib2to3.*\.pickle$',
+    ]
+    "exempt writing to paths that match the pattern"
+
+    def __init__(self, sandbox, exceptions=_EXCEPTIONS):
+        self._sandbox = os.path.normcase(os.path.realpath(sandbox))
+        self._prefix = os.path.join(self._sandbox, '')
+        self._exceptions = [
+            os.path.normcase(os.path.realpath(path))
+            for path in exceptions
+        ]
+        AbstractSandbox.__init__(self)
+
+    def _violation(self, operation, *args, **kw):
+        from setuptools.sandbox import SandboxViolation
+        raise SandboxViolation(operation, args, kw)
+
+    if _file:
+
+        def _file(self, path, mode='r', *args, **kw):
+            if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
+                self._violation("file", path, mode, *args, **kw)
+            return _file(path, mode, *args, **kw)
+
+    def _open(self, path, mode='r', *args, **kw):
+        if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
+            self._violation("open", path, mode, *args, **kw)
+        return _open(path, mode, *args, **kw)
+
+    def tmpnam(self):
+        self._violation("tmpnam")
+
+    def _ok(self, path):
+        active = self._active
+        try:
+            self._active = False
+            realpath = os.path.normcase(os.path.realpath(path))
+            return (
+                self._exempted(realpath)
+                or realpath == self._sandbox
+                or realpath.startswith(self._prefix)
+            )
+        finally:
+            self._active = active
+
+    def _exempted(self, filepath):
+        start_matches = (
+            filepath.startswith(exception)
+            for exception in self._exceptions
+        )
+        pattern_matches = (
+            re.match(pattern, filepath)
+            for pattern in self._exception_patterns
+        )
+        candidates = itertools.chain(start_matches, pattern_matches)
+        return any(candidates)
+
+    def _remap_input(self, operation, path, *args, **kw):
+        """Called for path inputs"""
+        if operation in self.write_ops and not self._ok(path):
+            self._violation(operation, os.path.realpath(path), *args, **kw)
+        return path
+
+    def _remap_pair(self, operation, src, dst, *args, **kw):
+        """Called for path pairs like rename, link, and symlink operations"""
+        if not self._ok(src) or not self._ok(dst):
+            self._violation(operation, src, dst, *args, **kw)
+        return (src, dst)
+
+    def open(self, file, flags, mode=0o777, *args, **kw):
+        """Called for low-level os.open()"""
+        if flags & WRITE_FLAGS and not self._ok(file):
+            self._violation("os.open", file, flags, mode, *args, **kw)
+        return _os.open(file, flags, mode, *args, **kw)
+
+
+WRITE_FLAGS = functools.reduce(
+    operator.or_, [getattr(_os, a, 0) for a in
+        "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
+)
+
+
+class SandboxViolation(DistutilsError):
+    """A setup script attempted to modify the filesystem outside the sandbox"""
+
+    tmpl = textwrap.dedent("""
+        SandboxViolation: {cmd}{args!r} {kwargs}
+
+        The package setup script has attempted to modify files on your system
+        that are not within the EasyInstall build area, and has been aborted.
+
+        This package cannot be safely installed by EasyInstall, and may not
+        support alternate installation locations even if you run its setup
+        script by hand.  Please inform the package's author and the EasyInstall
+        maintainers to find out if a fix or workaround is available.
+        """).lstrip()
+
+    def __str__(self):
+        cmd, args, kwargs = self.args
+        return self.tmpl.format(**locals())
diff --git "a/setuptools/script \050dev\051.tmpl" "b/setuptools/script \050dev\051.tmpl"
new file mode 100644
index 0000000..d58b1bb
--- /dev/null
+++ "b/setuptools/script \050dev\051.tmpl"
@@ -0,0 +1,5 @@
+# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r
+__requires__ = %(spec)r
+__import__('pkg_resources').require(%(spec)r)
+__file__ = %(dev_path)r
+exec(compile(open(__file__).read(), __file__, 'exec'))
diff --git a/setuptools/script.tmpl b/setuptools/script.tmpl
new file mode 100644
index 0000000..ff5efbc
--- /dev/null
+++ b/setuptools/script.tmpl
@@ -0,0 +1,3 @@
+# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r
+__requires__ = %(spec)r
+__import__('pkg_resources').run_script(%(spec)r, %(script_name)r)
diff --git a/setuptools/site-patch.py b/setuptools/site-patch.py
new file mode 100644
index 0000000..0d2d2ff
--- /dev/null
+++ b/setuptools/site-patch.py
@@ -0,0 +1,74 @@
+def __boot():
+    import sys
+    import os
+    PYTHONPATH = os.environ.get('PYTHONPATH')
+    if PYTHONPATH is None or (sys.platform == 'win32' and not PYTHONPATH):
+        PYTHONPATH = []
+    else:
+        PYTHONPATH = PYTHONPATH.split(os.pathsep)
+
+    pic = getattr(sys, 'path_importer_cache', {})
+    stdpath = sys.path[len(PYTHONPATH):]
+    mydir = os.path.dirname(__file__)
+
+    for item in stdpath:
+        if item == mydir or not item:
+            continue  # skip if current dir. on Windows, or my own directory
+        importer = pic.get(item)
+        if importer is not None:
+            loader = importer.find_module('site')
+            if loader is not None:
+                # This should actually reload the current module
+                loader.load_module('site')
+                break
+        else:
+            try:
+                import imp  # Avoid import loop in Python >= 3.3
+                stream, path, descr = imp.find_module('site', [item])
+            except ImportError:
+                continue
+            if stream is None:
+                continue
+            try:
+                # This should actually reload the current module
+                imp.load_module('site', stream, path, descr)
+            finally:
+                stream.close()
+            break
+    else:
+        raise ImportError("Couldn't find the real 'site' module")
+
+    known_paths = dict([(makepath(item)[1], 1) for item in sys.path])  # 2.2 comp
+
+    oldpos = getattr(sys, '__egginsert', 0)  # save old insertion position
+    sys.__egginsert = 0  # and reset the current one
+
+    for item in PYTHONPATH:
+        addsitedir(item)
+
+    sys.__egginsert += oldpos  # restore effective old position
+
+    d, nd = makepath(stdpath[0])
+    insert_at = None
+    new_path = []
+
+    for item in sys.path:
+        p, np = makepath(item)
+
+        if np == nd and insert_at is None:
+            # We've hit the first 'system' path entry, so added entries go here
+            insert_at = len(new_path)
+
+        if np in known_paths or insert_at is None:
+            new_path.append(item)
+        else:
+            # new path after the insert point, back-insert it
+            new_path.insert(insert_at, item)
+            insert_at += 1
+
+    sys.path[:] = new_path
+
+
+if __name__ == 'site':
+    __boot()
+    del __boot
diff --git a/setuptools/ssl_support.py b/setuptools/ssl_support.py
new file mode 100644
index 0000000..6362f1f
--- /dev/null
+++ b/setuptools/ssl_support.py
@@ -0,0 +1,260 @@
+import os
+import socket
+import atexit
+import re
+import functools
+
+from setuptools.extern.six.moves import urllib, http_client, map, filter
+
+from pkg_resources import ResolutionError, ExtractionError
+
+try:
+    import ssl
+except ImportError:
+    ssl = None
+
+__all__ = [
+    'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
+    'opener_for'
+]
+
+cert_paths = """
+/etc/pki/tls/certs/ca-bundle.crt
+/etc/ssl/certs/ca-certificates.crt
+/usr/share/ssl/certs/ca-bundle.crt
+/usr/local/share/certs/ca-root.crt
+/etc/ssl/cert.pem
+/System/Library/OpenSSL/certs/cert.pem
+/usr/local/share/certs/ca-root-nss.crt
+/etc/ssl/ca-bundle.pem
+""".strip().split()
+
+try:
+    HTTPSHandler = urllib.request.HTTPSHandler
+    HTTPSConnection = http_client.HTTPSConnection
+except AttributeError:
+    HTTPSHandler = HTTPSConnection = object
+
+is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
+
+
+try:
+    from ssl import CertificateError, match_hostname
+except ImportError:
+    try:
+        from backports.ssl_match_hostname import CertificateError
+        from backports.ssl_match_hostname import match_hostname
+    except ImportError:
+        CertificateError = None
+        match_hostname = None
+
+if not CertificateError:
+
+    class CertificateError(ValueError):
+        pass
+
+
+if not match_hostname:
+
+    def _dnsname_match(dn, hostname, max_wildcards=1):
+        """Matching according to RFC 6125, section 6.4.3
+
+        http://tools.ietf.org/html/rfc6125#section-6.4.3
+        """
+        pats = []
+        if not dn:
+            return False
+
+        # Ported from python3-syntax:
+        # leftmost, *remainder = dn.split(r'.')
+        parts = dn.split(r'.')
+        leftmost = parts[0]
+        remainder = parts[1:]
+
+        wildcards = leftmost.count('*')
+        if wildcards > max_wildcards:
+            # Issue #17980: avoid denials of service by refusing more
+            # than one wildcard per fragment.  A survey of established
+            # policy among SSL implementations showed it to be a
+            # reasonable choice.
+            raise CertificateError(
+                "too many wildcards in certificate DNS name: " + repr(dn))
+
+        # speed up common case w/o wildcards
+        if not wildcards:
+            return dn.lower() == hostname.lower()
+
+        # RFC 6125, section 6.4.3, subitem 1.
+        # The client SHOULD NOT attempt to match a presented identifier in which
+        # the wildcard character comprises a label other than the left-most label.
+        if leftmost == '*':
+            # When '*' is a fragment by itself, it matches a non-empty dotless
+            # fragment.
+            pats.append('[^.]+')
+        elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+            # RFC 6125, section 6.4.3, subitem 3.
+            # The client SHOULD NOT attempt to match a presented identifier
+            # where the wildcard character is embedded within an A-label or
+            # U-label of an internationalized domain name.
+            pats.append(re.escape(leftmost))
+        else:
+            # Otherwise, '*' matches any dotless string, e.g. www*
+            pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+        # add the remaining fragments, ignore any wildcards
+        for frag in remainder:
+            pats.append(re.escape(frag))
+
+        pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+        return pat.match(hostname)
+
+    def match_hostname(cert, hostname):
+        """Verify that *cert* (in decoded format as returned by
+        SSLSocket.getpeercert()) matches the *hostname*.  RFC 2818 and RFC 6125
+        rules are followed, but IP addresses are not accepted for *hostname*.
+
+        CertificateError is raised on failure. On success, the function
+        returns nothing.
+        """
+        if not cert:
+            raise ValueError("empty or no certificate")
+        dnsnames = []
+        san = cert.get('subjectAltName', ())
+        for key, value in san:
+            if key == 'DNS':
+                if _dnsname_match(value, hostname):
+                    return
+                dnsnames.append(value)
+        if not dnsnames:
+            # The subject is only checked when there is no dNSName entry
+            # in subjectAltName
+            for sub in cert.get('subject', ()):
+                for key, value in sub:
+                    # XXX according to RFC 2818, the most specific Common Name
+                    # must be used.
+                    if key == 'commonName':
+                        if _dnsname_match(value, hostname):
+                            return
+                        dnsnames.append(value)
+        if len(dnsnames) > 1:
+            raise CertificateError("hostname %r "
+                "doesn't match either of %s"
+                % (hostname, ', '.join(map(repr, dnsnames))))
+        elif len(dnsnames) == 1:
+            raise CertificateError("hostname %r "
+                "doesn't match %r"
+                % (hostname, dnsnames[0]))
+        else:
+            raise CertificateError("no appropriate commonName or "
+                "subjectAltName fields were found")
+
+
+class VerifyingHTTPSHandler(HTTPSHandler):
+    """Simple verifying handler: no auth, subclasses, timeouts, etc."""
+
+    def __init__(self, ca_bundle):
+        self.ca_bundle = ca_bundle
+        HTTPSHandler.__init__(self)
+
+    def https_open(self, req):
+        return self.do_open(
+            lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
+        )
+
+
+class VerifyingHTTPSConn(HTTPSConnection):
+    """Simple verifying connection: no auth, subclasses, timeouts, etc."""
+
+    def __init__(self, host, ca_bundle, **kw):
+        HTTPSConnection.__init__(self, host, **kw)
+        self.ca_bundle = ca_bundle
+
+    def connect(self):
+        sock = socket.create_connection(
+            (self.host, self.port), getattr(self, 'source_address', None)
+        )
+
+        # Handle the socket if a (proxy) tunnel is present
+        if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
+            self.sock = sock
+            self._tunnel()
+            # http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
+            # change self.host to mean the proxy server host when tunneling is
+            # being used. Adapt, since we are interested in the destination
+            # host for the match_hostname() comparison.
+            actual_host = self._tunnel_host
+        else:
+            actual_host = self.host
+
+        if hasattr(ssl, 'create_default_context'):
+            ctx = ssl.create_default_context(cafile=self.ca_bundle)
+            self.sock = ctx.wrap_socket(sock, server_hostname=actual_host)
+        else:
+            # This is for python < 2.7.9 and < 3.4?
+            self.sock = ssl.wrap_socket(
+                sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
+            )
+        try:
+            match_hostname(self.sock.getpeercert(), actual_host)
+        except CertificateError:
+            self.sock.shutdown(socket.SHUT_RDWR)
+            self.sock.close()
+            raise
+
+
+def opener_for(ca_bundle=None):
+    """Get a urlopen() replacement that uses ca_bundle for verification"""
+    return urllib.request.build_opener(
+        VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
+    ).open
+
+
+# from jaraco.functools
+def once(func):
+    @functools.wraps(func)
+    def wrapper(*args, **kwargs):
+        if not hasattr(func, 'always_returns'):
+            func.always_returns = func(*args, **kwargs)
+        return func.always_returns
+    return wrapper
+
+
+@once
+def get_win_certfile():
+    try:
+        import wincertstore
+    except ImportError:
+        return None
+
+    class CertFile(wincertstore.CertFile):
+        def __init__(self):
+            super(CertFile, self).__init__()
+            atexit.register(self.close)
+
+        def close(self):
+            try:
+                super(CertFile, self).close()
+            except OSError:
+                pass
+
+    _wincerts = CertFile()
+    _wincerts.addstore('CA')
+    _wincerts.addstore('ROOT')
+    return _wincerts.name
+
+
+def find_ca_bundle():
+    """Return an existing CA bundle path, or None"""
+    extant_cert_paths = filter(os.path.isfile, cert_paths)
+    return (
+        get_win_certfile()
+        or next(extant_cert_paths, None)
+        or _certifi_where()
+    )
+
+
+def _certifi_where():
+    try:
+        return __import__('certifi').where()
+    except (ImportError, ResolutionError, ExtractionError):
+        pass
diff --git a/setuptools/tests/__init__.py b/setuptools/tests/__init__.py
new file mode 100644
index 0000000..54dd7d2
--- /dev/null
+++ b/setuptools/tests/__init__.py
@@ -0,0 +1,6 @@
+import locale
+
+import pytest
+
+is_ascii = locale.getpreferredencoding() == 'ANSI_X3.4-1968'
+fail_on_ascii = pytest.mark.xfail(is_ascii, reason="Test fails in this locale")
diff --git a/setuptools/tests/contexts.py b/setuptools/tests/contexts.py
new file mode 100644
index 0000000..535ae10
--- /dev/null
+++ b/setuptools/tests/contexts.py
@@ -0,0 +1,98 @@
+import tempfile
+import os
+import shutil
+import sys
+import contextlib
+import site
+
+from setuptools.extern import six
+import pkg_resources
+
+
+@contextlib.contextmanager
+def tempdir(cd=lambda dir: None, **kwargs):
+    temp_dir = tempfile.mkdtemp(**kwargs)
+    orig_dir = os.getcwd()
+    try:
+        cd(temp_dir)
+        yield temp_dir
+    finally:
+        cd(orig_dir)
+        shutil.rmtree(temp_dir)
+
+
+@contextlib.contextmanager
+def environment(**replacements):
+    """
+    In a context, patch the environment with replacements. Pass None values
+    to clear the values.
+    """
+    saved = dict(
+        (key, os.environ[key])
+        for key in replacements
+        if key in os.environ
+    )
+
+    # remove values that are null
+    remove = (key for (key, value) in replacements.items() if value is None)
+    for key in list(remove):
+        os.environ.pop(key, None)
+        replacements.pop(key)
+
+    os.environ.update(replacements)
+
+    try:
+        yield saved
+    finally:
+        for key in replacements:
+            os.environ.pop(key, None)
+        os.environ.update(saved)
+
+
+@contextlib.contextmanager
+def quiet():
+    """
+    Redirect stdout/stderr to StringIO objects to prevent console output from
+    distutils commands.
+    """
+
+    old_stdout = sys.stdout
+    old_stderr = sys.stderr
+    new_stdout = sys.stdout = six.StringIO()
+    new_stderr = sys.stderr = six.StringIO()
+    try:
+        yield new_stdout, new_stderr
+    finally:
+        new_stdout.seek(0)
+        new_stderr.seek(0)
+        sys.stdout = old_stdout
+        sys.stderr = old_stderr
+
+
+@contextlib.contextmanager
+def save_user_site_setting():
+    saved = site.ENABLE_USER_SITE
+    try:
+        yield saved
+    finally:
+        site.ENABLE_USER_SITE = saved
+
+
+@contextlib.contextmanager
+def save_pkg_resources_state():
+    pr_state = pkg_resources.__getstate__()
+    # also save sys.path
+    sys_path = sys.path[:]
+    try:
+        yield pr_state, sys_path
+    finally:
+        sys.path[:] = sys_path
+        pkg_resources.__setstate__(pr_state)
+
+
+@contextlib.contextmanager
+def suppress_exceptions(*excs):
+    try:
+        yield
+    except excs:
+        pass
diff --git a/setuptools/tests/environment.py b/setuptools/tests/environment.py
new file mode 100644
index 0000000..c67898c
--- /dev/null
+++ b/setuptools/tests/environment.py
@@ -0,0 +1,60 @@
+import os
+import sys
+import unicodedata
+
+from subprocess import Popen as _Popen, PIPE as _PIPE
+
+
+def _which_dirs(cmd):
+    result = set()
+    for path in os.environ.get('PATH', '').split(os.pathsep):
+        filename = os.path.join(path, cmd)
+        if os.access(filename, os.X_OK):
+            result.add(path)
+    return result
+
+
+def run_setup_py(cmd, pypath=None, path=None,
+                 data_stream=0, env=None):
+    """
+    Execution command for tests, separate from those used by the
+    code directly to prevent accidental behavior issues
+    """
+    if env is None:
+        env = dict()
+        for envname in os.environ:
+            env[envname] = os.environ[envname]
+
+    # override the python path if needed
+    if pypath is not None:
+        env["PYTHONPATH"] = pypath
+
+    # overide the execution path if needed
+    if path is not None:
+        env["PATH"] = path
+    if not env.get("PATH", ""):
+        env["PATH"] = _which_dirs("tar").union(_which_dirs("gzip"))
+        env["PATH"] = os.pathsep.join(env["PATH"])
+
+    cmd = [sys.executable, "setup.py"] + list(cmd)
+
+    # http://bugs.python.org/issue8557
+    shell = sys.platform == 'win32'
+
+    try:
+        proc = _Popen(
+            cmd, stdout=_PIPE, stderr=_PIPE, shell=shell, env=env,
+        )
+
+        data = proc.communicate()[data_stream]
+    except OSError:
+        return 1, ''
+
+    # decode the console string if needed
+    if hasattr(data, "decode"):
+        # use the default encoding
+        data = data.decode()
+        data = unicodedata.normalize('NFC', data)
+
+    # communicate calls wait()
+    return proc.returncode, data
diff --git a/setuptools/tests/files.py b/setuptools/tests/files.py
new file mode 100644
index 0000000..f5f0e6b
--- /dev/null
+++ b/setuptools/tests/files.py
@@ -0,0 +1,39 @@
+import os
+
+
+from setuptools.extern.six import binary_type
+import pkg_resources.py31compat
+
+
+def build_files(file_defs, prefix=""):
+    """
+    Build a set of files/directories, as described by the file_defs dictionary.
+
+    Each key/value pair in the dictionary is interpreted as a filename/contents
+    pair. If the contents value is a dictionary, a directory is created, and the
+    dictionary interpreted as the files within it, recursively.
+
+    For example:
+
+    {"README.txt": "A README file",
+     "foo": {
+        "__init__.py": "",
+        "bar": {
+            "__init__.py": "",
+        },
+        "baz.py": "# Some code",
+     }
+    }
+    """
+    for name, contents in file_defs.items():
+        full_name = os.path.join(prefix, name)
+        if isinstance(contents, dict):
+            pkg_resources.py31compat.makedirs(full_name, exist_ok=True)
+            build_files(contents, prefix=full_name)
+        else:
+            if isinstance(contents, binary_type):
+                with open(full_name, 'wb') as f:
+                    f.write(contents)
+            else:
+                with open(full_name, 'w') as f:
+                    f.write(contents)
diff --git a/setuptools/tests/fixtures.py b/setuptools/tests/fixtures.py
new file mode 100644
index 0000000..5204c8d
--- /dev/null
+++ b/setuptools/tests/fixtures.py
@@ -0,0 +1,23 @@
+import pytest
+
+from . import contexts
+
+
+@pytest.yield_fixture
+def user_override(monkeypatch):
+    """
+    Override site.USER_BASE and site.USER_SITE with temporary directories in
+    a context.
+    """
+    with contexts.tempdir() as user_base:
+        monkeypatch.setattr('site.USER_BASE', user_base)
+        with contexts.tempdir() as user_site:
+            monkeypatch.setattr('site.USER_SITE', user_site)
+            with contexts.save_user_site_setting():
+                yield
+
+
+@pytest.yield_fixture
+def tmpdir_cwd(tmpdir):
+    with tmpdir.as_cwd() as orig:
+        yield orig
diff --git a/setuptools/tests/indexes/test_links_priority/external.html b/setuptools/tests/indexes/test_links_priority/external.html
new file mode 100644
index 0000000..92e4702
--- /dev/null
+++ b/setuptools/tests/indexes/test_links_priority/external.html
@@ -0,0 +1,3 @@
+<html><body>
+<a href="/foobar-0.1.tar.gz#md5=1__bad_md5___">bad old link</a>
+</body></html>
diff --git a/setuptools/tests/indexes/test_links_priority/simple/foobar/index.html b/setuptools/tests/indexes/test_links_priority/simple/foobar/index.html
new file mode 100644
index 0000000..fefb028
--- /dev/null
+++ b/setuptools/tests/indexes/test_links_priority/simple/foobar/index.html
@@ -0,0 +1,4 @@
+<html><body>
+<a href="/foobar-0.1.tar.gz#md5=0_correct_md5">foobar-0.1.tar.gz</a><br/>
+<a href="../../external.html" rel="homepage">external homepage</a><br/>
+</body></html>
diff --git a/setuptools/tests/mod_with_constant.py b/setuptools/tests/mod_with_constant.py
new file mode 100644
index 0000000..ef755dd
--- /dev/null
+++ b/setuptools/tests/mod_with_constant.py
@@ -0,0 +1 @@
+value = 'three, sir!'
diff --git a/setuptools/tests/namespaces.py b/setuptools/tests/namespaces.py
new file mode 100644
index 0000000..ef5ecda
--- /dev/null
+++ b/setuptools/tests/namespaces.py
@@ -0,0 +1,42 @@
+from __future__ import absolute_import, unicode_literals
+
+import textwrap
+
+
+def build_namespace_package(tmpdir, name):
+    src_dir = tmpdir / name
+    src_dir.mkdir()
+    setup_py = src_dir / 'setup.py'
+    namespace, sep, rest = name.partition('.')
+    script = textwrap.dedent("""
+        import setuptools
+        setuptools.setup(
+            name={name!r},
+            version="1.0",
+            namespace_packages=[{namespace!r}],
+            packages=[{namespace!r}],
+        )
+        """).format(**locals())
+    setup_py.write_text(script, encoding='utf-8')
+    ns_pkg_dir = src_dir / namespace
+    ns_pkg_dir.mkdir()
+    pkg_init = ns_pkg_dir / '__init__.py'
+    tmpl = '__import__("pkg_resources").declare_namespace({namespace!r})'
+    decl = tmpl.format(**locals())
+    pkg_init.write_text(decl, encoding='utf-8')
+    pkg_mod = ns_pkg_dir / (rest + '.py')
+    some_functionality = 'name = {rest!r}'.format(**locals())
+    pkg_mod.write_text(some_functionality, encoding='utf-8')
+    return src_dir
+
+
+def make_site_dir(target):
+    """
+    Add a sitecustomize.py module in target to cause
+    target to be added to site dirs such that .pth files
+    are processed there.
+    """
+    sc = target / 'sitecustomize.py'
+    target_str = str(target)
+    tmpl = '__import__("site").addsitedir({target_str!r})'
+    sc.write_text(tmpl.format(**locals()), encoding='utf-8')
diff --git a/setuptools/tests/script-with-bom.py b/setuptools/tests/script-with-bom.py
new file mode 100644
index 0000000..22dee0d
--- /dev/null
+++ b/setuptools/tests/script-with-bom.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+
+result = 'passed'
diff --git a/setuptools/tests/server.py b/setuptools/tests/server.py
new file mode 100644
index 0000000..3531212
--- /dev/null
+++ b/setuptools/tests/server.py
@@ -0,0 +1,72 @@
+"""Basic http server for tests to simulate PyPI or custom indexes
+"""
+
+import time
+import threading
+
+from setuptools.extern.six.moves import BaseHTTPServer, SimpleHTTPServer
+
+
+class IndexServer(BaseHTTPServer.HTTPServer):
+    """Basic single-threaded http server simulating a package index
+
+    You can use this server in unittest like this::
+        s = IndexServer()
+        s.start()
+        index_url = s.base_url() + 'mytestindex'
+        # do some test requests to the index
+        # The index files should be located in setuptools/tests/indexes
+        s.stop()
+    """
+
+    def __init__(self, server_address=('', 0),
+            RequestHandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler):
+        BaseHTTPServer.HTTPServer.__init__(self, server_address,
+            RequestHandlerClass)
+        self._run = True
+
+    def start(self):
+        self.thread = threading.Thread(target=self.serve_forever)
+        self.thread.start()
+
+    def stop(self):
+        "Stop the server"
+
+        # Let the server finish the last request and wait for a new one.
+        time.sleep(0.1)
+
+        self.shutdown()
+        self.thread.join()
+        self.socket.close()
+
+    def base_url(self):
+        port = self.server_port
+        return 'http://127.0.0.1:%s/setuptools/tests/indexes/' % port
+
+
+class RequestRecorder(BaseHTTPServer.BaseHTTPRequestHandler):
+    def do_GET(self):
+        requests = vars(self.server).setdefault('requests', [])
+        requests.append(self)
+        self.send_response(200, 'OK')
+
+
+class MockServer(BaseHTTPServer.HTTPServer, threading.Thread):
+    """
+    A simple HTTP Server that records the requests made to it.
+    """
+
+    def __init__(self, server_address=('', 0),
+            RequestHandlerClass=RequestRecorder):
+        BaseHTTPServer.HTTPServer.__init__(self, server_address,
+            RequestHandlerClass)
+        threading.Thread.__init__(self)
+        self.setDaemon(True)
+        self.requests = []
+
+    def run(self):
+        self.serve_forever()
+
+    @property
+    def url(self):
+        return 'http://localhost:%(server_port)s/' % vars(self)
diff --git a/setuptools/tests/test_archive_util.py b/setuptools/tests/test_archive_util.py
new file mode 100644
index 0000000..b789e9a
--- /dev/null
+++ b/setuptools/tests/test_archive_util.py
@@ -0,0 +1,42 @@
+# coding: utf-8
+
+import tarfile
+import io
+
+from setuptools.extern import six
+
+import pytest
+
+from setuptools import archive_util
+
+
+@pytest.fixture
+def tarfile_with_unicode(tmpdir):
+    """
+    Create a tarfile containing only a file whose name is
+    a zero byte file called testimäge.png.
+    """
+    tarobj = io.BytesIO()
+
+    with tarfile.open(fileobj=tarobj, mode="w:gz") as tgz:
+        data = b""
+
+        filename = "testimäge.png"
+        if six.PY2:
+            filename = filename.decode('utf-8')
+
+        t = tarfile.TarInfo(filename)
+        t.size = len(data)
+
+        tgz.addfile(t, io.BytesIO(data))
+
+    target = tmpdir / 'unicode-pkg-1.0.tar.gz'
+    with open(str(target), mode='wb') as tf:
+        tf.write(tarobj.getvalue())
+    return str(target)
+
+
+@pytest.mark.xfail(reason="#710 and #712")
+def test_unicode_files(tarfile_with_unicode, tmpdir):
+    target = tmpdir / 'out'
+    archive_util.unpack_archive(tarfile_with_unicode, six.text_type(target))
diff --git a/setuptools/tests/test_bdist_egg.py b/setuptools/tests/test_bdist_egg.py
new file mode 100644
index 0000000..54742aa
--- /dev/null
+++ b/setuptools/tests/test_bdist_egg.py
@@ -0,0 +1,66 @@
+"""develop tests
+"""
+import os
+import re
+import zipfile
+
+import pytest
+
+from setuptools.dist import Distribution
+
+from . import contexts
+
+SETUP_PY = """\
+from setuptools import setup
+
+setup(name='foo', py_modules=['hi'])
+"""
+
+
+@pytest.fixture(scope='function')
+def setup_context(tmpdir):
+    with (tmpdir / 'setup.py').open('w') as f:
+        f.write(SETUP_PY)
+    with (tmpdir / 'hi.py').open('w') as f:
+        f.write('1\n')
+    with tmpdir.as_cwd():
+        yield tmpdir
+
+
+class Test:
+    def test_bdist_egg(self, setup_context, user_override):
+        dist = Distribution(dict(
+            script_name='setup.py',
+            script_args=['bdist_egg'],
+            name='foo',
+            py_modules=['hi'],
+        ))
+        os.makedirs(os.path.join('build', 'src'))
+        with contexts.quiet():
+            dist.parse_command_line()
+            dist.run_commands()
+
+        # let's see if we got our egg link at the right place
+        [content] = os.listdir('dist')
+        assert re.match(r'foo-0.0.0-py[23].\d.egg$', content)
+
+    @pytest.mark.xfail(
+        os.environ.get('PYTHONDONTWRITEBYTECODE'),
+        reason="Byte code disabled",
+    )
+    def test_exclude_source_files(self, setup_context, user_override):
+        dist = Distribution(dict(
+            script_name='setup.py',
+            script_args=['bdist_egg', '--exclude-source-files'],
+            name='foo',
+            py_modules=['hi'],
+        ))
+        with contexts.quiet():
+            dist.parse_command_line()
+            dist.run_commands()
+        [dist_name] = os.listdir('dist')
+        dist_filename = os.path.join('dist', dist_name)
+        zip = zipfile.ZipFile(dist_filename)
+        names = list(zi.filename for zi in zip.filelist)
+        assert 'hi.pyc' in names
+        assert 'hi.py' not in names
diff --git a/setuptools/tests/test_build_clib.py b/setuptools/tests/test_build_clib.py
new file mode 100644
index 0000000..aebcc35
--- /dev/null
+++ b/setuptools/tests/test_build_clib.py
@@ -0,0 +1,59 @@
+import pytest
+import os
+import shutil
+
+import mock
+from distutils.errors import DistutilsSetupError
+from setuptools.command.build_clib import build_clib
+from setuptools.dist import Distribution
+
+
+class TestBuildCLib:
+    @mock.patch(
+            'setuptools.command.build_clib.newer_pairwise_group'
+            )
+    def test_build_libraries(self, mock_newer):
+        dist = Distribution()
+        cmd = build_clib(dist)
+
+        # this will be a long section, just making sure all
+        # exceptions are properly raised
+        libs = [('example', {'sources': 'broken.c'})]
+        with pytest.raises(DistutilsSetupError):
+            cmd.build_libraries(libs)
+
+        obj_deps = 'some_string'
+        libs = [('example', {'sources': ['source.c'], 'obj_deps': obj_deps})]
+        with pytest.raises(DistutilsSetupError):
+            cmd.build_libraries(libs)
+
+        obj_deps = {'': ''}
+        libs = [('example', {'sources': ['source.c'], 'obj_deps': obj_deps})]
+        with pytest.raises(DistutilsSetupError):
+            cmd.build_libraries(libs)
+
+        obj_deps = {'source.c': ''}
+        libs = [('example', {'sources': ['source.c'], 'obj_deps': obj_deps})]
+        with pytest.raises(DistutilsSetupError):
+            cmd.build_libraries(libs)
+
+        # with that out of the way, let's see if the crude dependency
+        # system works
+        cmd.compiler = mock.MagicMock(spec=cmd.compiler)
+        mock_newer.return_value = ([],[])
+
+        obj_deps = {'': ('global.h',), 'example.c': ('example.h',)}
+        libs = [('example', {'sources': ['example.c'] ,'obj_deps': obj_deps})]
+
+        cmd.build_libraries(libs)
+        assert [['example.c', 'global.h', 'example.h']] in mock_newer.call_args[0]
+        assert not cmd.compiler.compile.called
+        assert cmd.compiler.create_static_lib.call_count == 1
+
+        # reset the call numbers so we can test again
+        cmd.compiler.reset_mock()
+
+        mock_newer.return_value = ''  # anything as long as it's not ([],[])
+        cmd.build_libraries(libs)
+        assert cmd.compiler.compile.call_count == 1
+        assert cmd.compiler.create_static_lib.call_count == 1
diff --git a/setuptools/tests/test_build_ext.py b/setuptools/tests/test_build_ext.py
new file mode 100644
index 0000000..6025715
--- /dev/null
+++ b/setuptools/tests/test_build_ext.py
@@ -0,0 +1,45 @@
+import sys
+import distutils.command.build_ext as orig
+from distutils.sysconfig import get_config_var
+
+from setuptools.extern import six
+
+from setuptools.command.build_ext import build_ext, get_abi3_suffix
+from setuptools.dist import Distribution
+from setuptools.extension import Extension
+
+
+class TestBuildExt:
+    def test_get_ext_filename(self):
+        """
+        Setuptools needs to give back the same
+        result as distutils, even if the fullname
+        is not in ext_map.
+        """
+        dist = Distribution()
+        cmd = build_ext(dist)
+        cmd.ext_map['foo/bar'] = ''
+        res = cmd.get_ext_filename('foo')
+        wanted = orig.build_ext.get_ext_filename(cmd, 'foo')
+        assert res == wanted
+
+    def test_abi3_filename(self):
+        """
+        Filename needs to be loadable by several versions
+        of Python 3 if 'is_abi3' is truthy on Extension()
+        """
+        print(get_abi3_suffix())
+
+        extension = Extension('spam.eggs', ['eggs.c'], py_limited_api=True)
+        dist = Distribution(dict(ext_modules=[extension]))
+        cmd = build_ext(dist)
+        cmd.finalize_options()
+        assert 'spam.eggs' in cmd.ext_map
+        res = cmd.get_ext_filename('spam.eggs')
+
+        if six.PY2 or not get_abi3_suffix():
+            assert res.endswith(get_config_var('SO'))
+        elif sys.platform == 'win32':
+            assert res.endswith('eggs.pyd')
+        else:
+            assert 'abi3' in res
diff --git a/setuptools/tests/test_build_meta.py b/setuptools/tests/test_build_meta.py
new file mode 100644
index 0000000..659c1a6
--- /dev/null
+++ b/setuptools/tests/test_build_meta.py
@@ -0,0 +1,126 @@
+import os
+
+import pytest
+
+from .files import build_files
+from .textwrap import DALS
+
+
+futures = pytest.importorskip('concurrent.futures')
+importlib = pytest.importorskip('importlib')
+
+
+class BuildBackendBase(object):
+    def __init__(self, cwd=None, env={}, backend_name='setuptools.build_meta'):
+        self.cwd = cwd
+        self.env = env
+        self.backend_name = backend_name
+
+
+class BuildBackend(BuildBackendBase):
+    """PEP 517 Build Backend"""
+    def __init__(self, *args, **kwargs):
+        super(BuildBackend, self).__init__(*args, **kwargs)
+        self.pool = futures.ProcessPoolExecutor()
+
+    def __getattr__(self, name):
+        """Handles aribrary function invocations on the build backend."""
+        def method(*args, **kw):
+            root = os.path.abspath(self.cwd)
+            caller = BuildBackendCaller(root, self.env, self.backend_name)
+            return self.pool.submit(caller, name, *args, **kw).result()
+
+        return method
+
+
+class BuildBackendCaller(BuildBackendBase):
+    def __call__(self, name, *args, **kw):
+        """Handles aribrary function invocations on the build backend."""
+        os.chdir(self.cwd)
+        os.environ.update(self.env)
+        mod = importlib.import_module(self.backend_name)
+        return getattr(mod, name)(*args, **kw)
+
+
+defns = [{
+            'setup.py': DALS("""
+                __import__('setuptools').setup(
+                    name='foo',
+                    py_modules=['hello'],
+                    setup_requires=['six'],
+                )
+                """),
+            'hello.py': DALS("""
+                def run():
+                    print('hello')
+                """),
+        },
+        {
+            'setup.py': DALS("""
+                assert __name__ == '__main__'
+                __import__('setuptools').setup(
+                    name='foo',
+                    py_modules=['hello'],
+                    setup_requires=['six'],
+                )
+                """),
+            'hello.py': DALS("""
+                def run():
+                    print('hello')
+                """),
+        },
+        {
+            'setup.py': DALS("""
+                variable = True
+                def function():
+                    return variable
+                assert variable
+                __import__('setuptools').setup(
+                    name='foo',
+                    py_modules=['hello'],
+                    setup_requires=['six'],
+                )
+                """),
+            'hello.py': DALS("""
+                def run():
+                    print('hello')
+                """),
+        }]
+
+
+@pytest.fixture(params=defns)
+def build_backend(tmpdir, request):
+    build_files(request.param, prefix=str(tmpdir))
+    with tmpdir.as_cwd():
+        yield BuildBackend(cwd='.')
+
+
+def test_get_requires_for_build_wheel(build_backend):
+    actual = build_backend.get_requires_for_build_wheel()
+    expected = ['six', 'setuptools', 'wheel']
+    assert sorted(actual) == sorted(expected)
+
+
+def test_build_wheel(build_backend):
+    dist_dir = os.path.abspath('pip-wheel')
+    os.makedirs(dist_dir)
+    wheel_name = build_backend.build_wheel(dist_dir)
+
+    assert os.path.isfile(os.path.join(dist_dir, wheel_name))
+
+
+def test_build_sdist(build_backend):
+    dist_dir = os.path.abspath('pip-sdist')
+    os.makedirs(dist_dir)
+    sdist_name = build_backend.build_sdist(dist_dir)
+
+    assert os.path.isfile(os.path.join(dist_dir, sdist_name))
+
+
+def test_prepare_metadata_for_build_wheel(build_backend):
+    dist_dir = os.path.abspath('pip-dist-info')
+    os.makedirs(dist_dir)
+
+    dist_info = build_backend.prepare_metadata_for_build_wheel(dist_dir)
+
+    assert os.path.isfile(os.path.join(dist_dir, dist_info, 'METADATA'))
diff --git a/setuptools/tests/test_build_py.py b/setuptools/tests/test_build_py.py
new file mode 100644
index 0000000..cc701ae
--- /dev/null
+++ b/setuptools/tests/test_build_py.py
@@ -0,0 +1,30 @@
+import os
+
+import pytest
+
+from setuptools.dist import Distribution
+
+
+@pytest.yield_fixture
+def tmpdir_as_cwd(tmpdir):
+    with tmpdir.as_cwd():
+        yield tmpdir
+
+
+def test_directories_in_package_data_glob(tmpdir_as_cwd):
+    """
+    Directories matching the glob in package_data should
+    not be included in the package data.
+
+    Regression test for #261.
+    """
+    dist = Distribution(dict(
+        script_name='setup.py',
+        script_args=['build_py'],
+        packages=[''],
+        name='foo',
+        package_data={'': ['path/*']},
+    ))
+    os.makedirs('path/subpath')
+    dist.parse_command_line()
+    dist.run_commands()
diff --git a/setuptools/tests/test_config.py b/setuptools/tests/test_config.py
new file mode 100644
index 0000000..abb953a
--- /dev/null
+++ b/setuptools/tests/test_config.py
@@ -0,0 +1,583 @@
+import contextlib
+import pytest
+from distutils.errors import DistutilsOptionError, DistutilsFileError
+from setuptools.dist import Distribution
+from setuptools.config import ConfigHandler, read_configuration
+
+
+class ErrConfigHandler(ConfigHandler):
+    """Erroneous handler. Fails to implement required methods."""
+
+
+def make_package_dir(name, base_dir):
+    dir_package = base_dir.mkdir(name)
+    init_file = dir_package.join('__init__.py')
+    init_file.write('')
+    return dir_package, init_file
+
+
+def fake_env(tmpdir, setup_cfg, setup_py=None):
+
+    if setup_py is None:
+        setup_py = (
+            'from setuptools import setup\n'
+            'setup()\n'
+        )
+
+    tmpdir.join('setup.py').write(setup_py)
+    config = tmpdir.join('setup.cfg')
+    config.write(setup_cfg)
+
+    package_dir, init_file = make_package_dir('fake_package', tmpdir)
+
+    init_file.write(
+        'VERSION = (1, 2, 3)\n'
+        '\n'
+        'VERSION_MAJOR = 1'
+        '\n'
+        'def get_version():\n'
+        '    return [3, 4, 5, "dev"]\n'
+        '\n'
+    )
+    return package_dir, config
+
+
+@contextlib.contextmanager
+def get_dist(tmpdir, kwargs_initial=None, parse=True):
+    kwargs_initial = kwargs_initial or {}
+
+    with tmpdir.as_cwd():
+        dist = Distribution(kwargs_initial)
+        dist.script_name = 'setup.py'
+        parse and dist.parse_config_files()
+
+        yield dist
+
+
+def test_parsers_implemented():
+
+    with pytest.raises(NotImplementedError):
+        handler = ErrConfigHandler(None, {})
+        handler.parsers
+
+
+class TestConfigurationReader:
+
+    def test_basic(self, tmpdir):
+        _, config = fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'version = 10.1.1\n'
+            'keywords = one, two\n'
+            '\n'
+            '[options]\n'
+            'scripts = bin/a.py, bin/b.py\n'
+        )
+        config_dict = read_configuration('%s' % config)
+        assert config_dict['metadata']['version'] == '10.1.1'
+        assert config_dict['metadata']['keywords'] == ['one', 'two']
+        assert config_dict['options']['scripts'] == ['bin/a.py', 'bin/b.py']
+
+    def test_no_config(self, tmpdir):
+        with pytest.raises(DistutilsFileError):
+            read_configuration('%s' % tmpdir.join('setup.cfg'))
+
+    def test_ignore_errors(self, tmpdir):
+        _, config = fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'version = attr: none.VERSION\n'
+            'keywords = one, two\n'
+        )
+        with pytest.raises(ImportError):
+            read_configuration('%s' % config)
+
+        config_dict = read_configuration(
+            '%s' % config, ignore_option_errors=True)
+
+        assert config_dict['metadata']['keywords'] == ['one', 'two']
+        assert 'version' not in config_dict['metadata']
+
+        config.remove()
+
+
+class TestMetadata:
+
+    def test_basic(self, tmpdir):
+
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'version = 10.1.1\n'
+            'description = Some description\n'
+            'long_description_content_type = text/something\n'
+            'long_description = file: README\n'
+            'name = fake_name\n'
+            'keywords = one, two\n'
+            'provides = package, package.sub\n'
+            'license = otherlic\n'
+            'download_url = http://test.test.com/test/\n'
+            'maintainer_email = test@test.com\n'
+        )
+
+        tmpdir.join('README').write('readme contents\nline2')
+
+        meta_initial = {
+            # This will be used so `otherlic` won't replace it.
+            'license': 'BSD 3-Clause License',
+        }
+
+        with get_dist(tmpdir, meta_initial) as dist:
+            metadata = dist.metadata
+
+            assert metadata.version == '10.1.1'
+            assert metadata.description == 'Some description'
+            assert metadata.long_description_content_type == 'text/something'
+            assert metadata.long_description == 'readme contents\nline2'
+            assert metadata.provides == ['package', 'package.sub']
+            assert metadata.license == 'BSD 3-Clause License'
+            assert metadata.name == 'fake_name'
+            assert metadata.keywords == ['one', 'two']
+            assert metadata.download_url == 'http://test.test.com/test/'
+            assert metadata.maintainer_email == 'test@test.com'
+
+    def test_file_mixed(self, tmpdir):
+
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'long_description = file: README.rst, CHANGES.rst\n'
+            '\n'
+        )
+
+        tmpdir.join('README.rst').write('readme contents\nline2')
+        tmpdir.join('CHANGES.rst').write('changelog contents\nand stuff')
+
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.long_description == (
+                'readme contents\nline2\n'
+                'changelog contents\nand stuff'
+            )
+
+    def test_file_sandboxed(self, tmpdir):
+
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'long_description = file: ../../README\n'
+        )
+
+        with get_dist(tmpdir, parse=False) as dist:
+            with pytest.raises(DistutilsOptionError):
+                dist.parse_config_files()  # file: out of sandbox
+
+    def test_aliases(self, tmpdir):
+
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'author-email = test@test.com\n'
+            'home-page = http://test.test.com/test/\n'
+            'summary = Short summary\n'
+            'platform = a, b\n'
+            'classifier =\n'
+            '  Framework :: Django\n'
+            '  Programming Language :: Python :: 3.5\n'
+        )
+
+        with get_dist(tmpdir) as dist:
+            metadata = dist.metadata
+            assert metadata.author_email == 'test@test.com'
+            assert metadata.url == 'http://test.test.com/test/'
+            assert metadata.description == 'Short summary'
+            assert metadata.platforms == ['a', 'b']
+            assert metadata.classifiers == [
+                'Framework :: Django',
+                'Programming Language :: Python :: 3.5',
+            ]
+
+    def test_multiline(self, tmpdir):
+
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'name = fake_name\n'
+            'keywords =\n'
+            '  one\n'
+            '  two\n'
+            'classifiers =\n'
+            '  Framework :: Django\n'
+            '  Programming Language :: Python :: 3.5\n'
+        )
+        with get_dist(tmpdir) as dist:
+            metadata = dist.metadata
+            assert metadata.keywords == ['one', 'two']
+            assert metadata.classifiers == [
+                'Framework :: Django',
+                'Programming Language :: Python :: 3.5',
+            ]
+
+    def test_dict(self, tmpdir):
+
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'project_urls =\n'
+            '  Link One = https://example.com/one/\n'
+            '  Link Two = https://example.com/two/\n'
+        )
+        with get_dist(tmpdir) as dist:
+            metadata = dist.metadata
+            assert metadata.project_urls == {
+                'Link One': 'https://example.com/one/',
+                'Link Two': 'https://example.com/two/',
+            }
+
+    def test_version(self, tmpdir):
+
+        _, config = fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'version = attr: fake_package.VERSION\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '1.2.3'
+
+        config.write(
+            '[metadata]\n'
+            'version = attr: fake_package.get_version\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '3.4.5.dev'
+
+        config.write(
+            '[metadata]\n'
+            'version = attr: fake_package.VERSION_MAJOR\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '1'
+
+        subpack = tmpdir.join('fake_package').mkdir('subpackage')
+        subpack.join('__init__.py').write('')
+        subpack.join('submodule.py').write('VERSION = (2016, 11, 26)')
+
+        config.write(
+            '[metadata]\n'
+            'version = attr: fake_package.subpackage.submodule.VERSION\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '2016.11.26'
+
+    def test_unknown_meta_item(self, tmpdir):
+
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'name = fake_name\n'
+            'unknown = some\n'
+        )
+        with get_dist(tmpdir, parse=False) as dist:
+            dist.parse_config_files()  # Skip unknown.
+
+    def test_usupported_section(self, tmpdir):
+
+        fake_env(
+            tmpdir,
+            '[metadata.some]\n'
+            'key = val\n'
+        )
+        with get_dist(tmpdir, parse=False) as dist:
+            with pytest.raises(DistutilsOptionError):
+                dist.parse_config_files()
+
+    def test_classifiers(self, tmpdir):
+        expected = set([
+            'Framework :: Django',
+            'Programming Language :: Python :: 3',
+            'Programming Language :: Python :: 3.5',
+        ])
+
+        # From file.
+        _, config = fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'classifiers = file: classifiers\n'
+        )
+
+        tmpdir.join('classifiers').write(
+            'Framework :: Django\n'
+            'Programming Language :: Python :: 3\n'
+            'Programming Language :: Python :: 3.5\n'
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert set(dist.metadata.classifiers) == expected
+
+        # From list notation
+        config.write(
+            '[metadata]\n'
+            'classifiers =\n'
+            '    Framework :: Django\n'
+            '    Programming Language :: Python :: 3\n'
+            '    Programming Language :: Python :: 3.5\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert set(dist.metadata.classifiers) == expected
+
+
+class TestOptions:
+
+    def test_basic(self, tmpdir):
+
+        fake_env(
+            tmpdir,
+            '[options]\n'
+            'zip_safe = True\n'
+            'use_2to3 = 1\n'
+            'include_package_data = yes\n'
+            'package_dir = b=c, =src\n'
+            'packages = pack_a, pack_b.subpack\n'
+            'namespace_packages = pack1, pack2\n'
+            'use_2to3_fixers = your.fixers, or.here\n'
+            'use_2to3_exclude_fixers = one.here, two.there\n'
+            'convert_2to3_doctests = src/tests/one.txt, src/two.txt\n'
+            'scripts = bin/one.py, bin/two.py\n'
+            'eager_resources = bin/one.py, bin/two.py\n'
+            'install_requires = docutils>=0.3; pack ==1.1, ==1.3; hey\n'
+            'tests_require = mock==0.7.2; pytest\n'
+            'setup_requires = docutils>=0.3; spack ==1.1, ==1.3; there\n'
+            'dependency_links = http://some.com/here/1, '
+                'http://some.com/there/2\n'
+            'python_requires = >=1.0, !=2.8\n'
+            'py_modules = module1, module2\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert dist.zip_safe
+            assert dist.use_2to3
+            assert dist.include_package_data
+            assert dist.package_dir == {'': 'src', 'b': 'c'}
+            assert dist.packages == ['pack_a', 'pack_b.subpack']
+            assert dist.namespace_packages == ['pack1', 'pack2']
+            assert dist.use_2to3_fixers == ['your.fixers', 'or.here']
+            assert dist.use_2to3_exclude_fixers == ['one.here', 'two.there']
+            assert dist.convert_2to3_doctests == ([
+                'src/tests/one.txt', 'src/two.txt'])
+            assert dist.scripts == ['bin/one.py', 'bin/two.py']
+            assert dist.dependency_links == ([
+                'http://some.com/here/1',
+                'http://some.com/there/2'
+            ])
+            assert dist.install_requires == ([
+                'docutils>=0.3',
+                'pack==1.1,==1.3',
+                'hey'
+            ])
+            assert dist.setup_requires == ([
+                'docutils>=0.3',
+                'spack ==1.1, ==1.3',
+                'there'
+            ])
+            assert dist.tests_require == ['mock==0.7.2', 'pytest']
+            assert dist.python_requires == '>=1.0, !=2.8'
+            assert dist.py_modules == ['module1', 'module2']
+
+    def test_multiline(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[options]\n'
+            'package_dir = \n'
+            '  b=c\n'
+            '  =src\n'
+            'packages = \n'
+            '  pack_a\n'
+            '  pack_b.subpack\n'
+            'namespace_packages = \n'
+            '  pack1\n'
+            '  pack2\n'
+            'use_2to3_fixers = \n'
+            '  your.fixers\n'
+            '  or.here\n'
+            'use_2to3_exclude_fixers = \n'
+            '  one.here\n'
+            '  two.there\n'
+            'convert_2to3_doctests = \n'
+            '  src/tests/one.txt\n'
+            '  src/two.txt\n'
+            'scripts = \n'
+            '  bin/one.py\n'
+            '  bin/two.py\n'
+            'eager_resources = \n'
+            '  bin/one.py\n'
+            '  bin/two.py\n'
+            'install_requires = \n'
+            '  docutils>=0.3\n'
+            '  pack ==1.1, ==1.3\n'
+            '  hey\n'
+            'tests_require = \n'
+            '  mock==0.7.2\n'
+            '  pytest\n'
+            'setup_requires = \n'
+            '  docutils>=0.3\n'
+            '  spack ==1.1, ==1.3\n'
+            '  there\n'
+            'dependency_links = \n'
+            '  http://some.com/here/1\n'
+            '  http://some.com/there/2\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert dist.package_dir == {'': 'src', 'b': 'c'}
+            assert dist.packages == ['pack_a', 'pack_b.subpack']
+            assert dist.namespace_packages == ['pack1', 'pack2']
+            assert dist.use_2to3_fixers == ['your.fixers', 'or.here']
+            assert dist.use_2to3_exclude_fixers == ['one.here', 'two.there']
+            assert dist.convert_2to3_doctests == (
+                ['src/tests/one.txt', 'src/two.txt'])
+            assert dist.scripts == ['bin/one.py', 'bin/two.py']
+            assert dist.dependency_links == ([
+                'http://some.com/here/1',
+                'http://some.com/there/2'
+            ])
+            assert dist.install_requires == ([
+                'docutils>=0.3',
+                'pack==1.1,==1.3',
+                'hey'
+            ])
+            assert dist.setup_requires == ([
+                'docutils>=0.3',
+                'spack ==1.1, ==1.3',
+                'there'
+            ])
+            assert dist.tests_require == ['mock==0.7.2', 'pytest']
+
+    def test_package_dir_fail(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[options]\n'
+            'package_dir = a b\n'
+        )
+        with get_dist(tmpdir, parse=False) as dist:
+            with pytest.raises(DistutilsOptionError):
+                dist.parse_config_files()
+
+    def test_package_data(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[options.package_data]\n'
+            '* = *.txt, *.rst\n'
+            'hello = *.msg\n'
+            '\n'
+            '[options.exclude_package_data]\n'
+            '* = fake1.txt, fake2.txt\n'
+            'hello = *.dat\n'
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.package_data == {
+                '': ['*.txt', '*.rst'],
+                'hello': ['*.msg'],
+            }
+            assert dist.exclude_package_data == {
+                '': ['fake1.txt', 'fake2.txt'],
+                'hello': ['*.dat'],
+            }
+
+    def test_packages(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[options]\n'
+            'packages = find:\n'
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.packages == ['fake_package']
+
+    def test_find_directive(self, tmpdir):
+        dir_package, config = fake_env(
+            tmpdir,
+            '[options]\n'
+            'packages = find:\n'
+        )
+
+        dir_sub_one, _ = make_package_dir('sub_one', dir_package)
+        dir_sub_two, _ = make_package_dir('sub_two', dir_package)
+
+        with get_dist(tmpdir) as dist:
+            assert set(dist.packages) == set([
+                'fake_package', 'fake_package.sub_two', 'fake_package.sub_one'
+            ])
+
+        config.write(
+            '[options]\n'
+            'packages = find:\n'
+            '\n'
+            '[options.packages.find]\n'
+            'where = .\n'
+            'include =\n'
+            '    fake_package.sub_one\n'
+            '    two\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert dist.packages == ['fake_package.sub_one']
+
+        config.write(
+            '[options]\n'
+            'packages = find:\n'
+            '\n'
+            '[options.packages.find]\n'
+            'exclude =\n'
+            '    fake_package.sub_one\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert set(dist.packages) == set(
+                ['fake_package',  'fake_package.sub_two'])
+
+    def test_extras_require(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[options.extras_require]\n'
+            'pdf = ReportLab>=1.2; RXP\n'
+            'rest = \n'
+            '  docutils>=0.3\n'
+            '  pack ==1.1, ==1.3\n'
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.extras_require == {
+                'pdf': ['ReportLab>=1.2', 'RXP'],
+                'rest': ['docutils>=0.3', 'pack==1.1,==1.3']
+            }
+            assert dist.metadata.provides_extras == set(['pdf', 'rest'])
+
+    def test_entry_points(self, tmpdir):
+        _, config = fake_env(
+            tmpdir,
+            '[options.entry_points]\n'
+            'group1 = point1 = pack.module:func, '
+                '.point2 = pack.module2:func_rest [rest]\n'
+            'group2 = point3 = pack.module:func2\n'
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.entry_points == {
+                'group1': [
+                    'point1 = pack.module:func',
+                    '.point2 = pack.module2:func_rest [rest]',
+                ],
+                'group2': ['point3 = pack.module:func2']
+            }
+
+        expected = (
+            '[blogtool.parsers]\n'
+            '.rst = some.nested.module:SomeClass.some_classmethod[reST]\n'
+        )
+
+        tmpdir.join('entry_points').write(expected)
+
+        # From file.
+        config.write(
+            '[options]\n'
+            'entry_points = file: entry_points\n'
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.entry_points == expected
diff --git a/setuptools/tests/test_dep_util.py b/setuptools/tests/test_dep_util.py
new file mode 100644
index 0000000..e5027c1
--- /dev/null
+++ b/setuptools/tests/test_dep_util.py
@@ -0,0 +1,30 @@
+from setuptools.dep_util import newer_pairwise_group
+import os
+import pytest
+
+
+@pytest.fixture
+def groups_target(tmpdir):
+    """Sets up some older sources, a target and newer sources.
+    Returns a 3-tuple in this order.
+    """
+    creation_order = ['older.c', 'older.h', 'target.o', 'newer.c', 'newer.h']
+    mtime = 0
+
+    for i in range(len(creation_order)):
+        creation_order[i] = os.path.join(str(tmpdir), creation_order[i])
+        with open(creation_order[i], 'w'):
+            pass
+
+        # make sure modification times are sequential
+        os.utime(creation_order[i], (mtime, mtime))
+        mtime += 1
+
+    return creation_order[:2], creation_order[2], creation_order[3:]
+
+
+def test_newer_pairwise_group(groups_target):
+    older = newer_pairwise_group([groups_target[0]], [groups_target[1]])
+    newer = newer_pairwise_group([groups_target[2]], [groups_target[1]])
+    assert older == ([], [])
+    assert newer == ([groups_target[2]], [groups_target[1]])
diff --git a/setuptools/tests/test_depends.py b/setuptools/tests/test_depends.py
new file mode 100644
index 0000000..e0cfa88
--- /dev/null
+++ b/setuptools/tests/test_depends.py
@@ -0,0 +1,16 @@
+import sys
+
+from setuptools import depends
+
+
+class TestGetModuleConstant:
+
+	def test_basic(self):
+		"""
+		Invoke get_module_constant on a module in
+		the test package.
+		"""
+		mod_name = 'setuptools.tests.mod_with_constant'
+		val = depends.get_module_constant(mod_name, 'value')
+		assert val == 'three, sir!'
+		assert 'setuptools.tests.mod_with_constant' not in sys.modules
diff --git a/setuptools/tests/test_develop.py b/setuptools/tests/test_develop.py
new file mode 100644
index 0000000..00d4bd9
--- /dev/null
+++ b/setuptools/tests/test_develop.py
@@ -0,0 +1,202 @@
+"""develop tests
+"""
+
+from __future__ import absolute_import, unicode_literals
+
+import os
+import site
+import sys
+import io
+import subprocess
+import platform
+
+from setuptools.extern import six
+from setuptools.command import test
+
+import pytest
+
+from setuptools.command.develop import develop
+from setuptools.dist import Distribution
+from . import contexts
+from . import namespaces
+
+SETUP_PY = """\
+from setuptools import setup
+
+setup(name='foo',
+    packages=['foo'],
+    use_2to3=True,
+)
+"""
+
+INIT_PY = """print "foo"
+"""
+
+
+@pytest.yield_fixture
+def temp_user(monkeypatch):
+    with contexts.tempdir() as user_base:
+        with contexts.tempdir() as user_site:
+            monkeypatch.setattr('site.USER_BASE', user_base)
+            monkeypatch.setattr('site.USER_SITE', user_site)
+            yield
+
+
+@pytest.yield_fixture
+def test_env(tmpdir, temp_user):
+    target = tmpdir
+    foo = target.mkdir('foo')
+    setup = target / 'setup.py'
+    if setup.isfile():
+        raise ValueError(dir(target))
+    with setup.open('w') as f:
+        f.write(SETUP_PY)
+    init = foo / '__init__.py'
+    with init.open('w') as f:
+        f.write(INIT_PY)
+    with target.as_cwd():
+        yield target
+
+
+class TestDevelop:
+    in_virtualenv = hasattr(sys, 'real_prefix')
+    in_venv = hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix
+
+    @pytest.mark.skipif(
+        in_virtualenv or in_venv,
+        reason="Cannot run when invoked in a virtualenv or venv")
+    def test_2to3_user_mode(self, test_env):
+        settings = dict(
+            name='foo',
+            packages=['foo'],
+            use_2to3=True,
+            version='0.0',
+        )
+        dist = Distribution(settings)
+        dist.script_name = 'setup.py'
+        cmd = develop(dist)
+        cmd.user = 1
+        cmd.ensure_finalized()
+        cmd.install_dir = site.USER_SITE
+        cmd.user = 1
+        with contexts.quiet():
+            cmd.run()
+
+        # let's see if we got our egg link at the right place
+        content = os.listdir(site.USER_SITE)
+        content.sort()
+        assert content == ['easy-install.pth', 'foo.egg-link']
+
+        # Check that we are using the right code.
+        fn = os.path.join(site.USER_SITE, 'foo.egg-link')
+        with io.open(fn) as egg_link_file:
+            path = egg_link_file.read().split()[0].strip()
+        fn = os.path.join(path, 'foo', '__init__.py')
+        with io.open(fn) as init_file:
+            init = init_file.read().strip()
+
+        expected = 'print("foo")' if six.PY3 else 'print "foo"'
+        assert init == expected
+
+    def test_console_scripts(self, tmpdir):
+        """
+        Test that console scripts are installed and that they reference
+        only the project by name and not the current version.
+        """
+        pytest.skip(
+            "TODO: needs a fixture to cause 'develop' "
+            "to be invoked without mutating environment.")
+        settings = dict(
+            name='foo',
+            packages=['foo'],
+            version='0.0',
+            entry_points={
+                'console_scripts': [
+                    'foocmd = foo:foo',
+                ],
+            },
+        )
+        dist = Distribution(settings)
+        dist.script_name = 'setup.py'
+        cmd = develop(dist)
+        cmd.ensure_finalized()
+        cmd.install_dir = tmpdir
+        cmd.run()
+        # assert '0.0' not in foocmd_text
+
+
+class TestResolver:
+    """
+    TODO: These tests were written with a minimal understanding
+    of what _resolve_setup_path is intending to do. Come up with
+    more meaningful cases that look like real-world scenarios.
+    """
+    def test_resolve_setup_path_cwd(self):
+        assert develop._resolve_setup_path('.', '.', '.') == '.'
+
+    def test_resolve_setup_path_one_dir(self):
+        assert develop._resolve_setup_path('pkgs', '.', 'pkgs') == '../'
+
+    def test_resolve_setup_path_one_dir_trailing_slash(self):
+        assert develop._resolve_setup_path('pkgs/', '.', 'pkgs') == '../'
+
+
+class TestNamespaces:
+
+    @staticmethod
+    def install_develop(src_dir, target):
+
+        develop_cmd = [
+            sys.executable,
+            'setup.py',
+            'develop',
+            '--install-dir', str(target),
+        ]
+        with src_dir.as_cwd():
+            with test.test.paths_on_pythonpath([str(target)]):
+                subprocess.check_call(develop_cmd)
+
+    @pytest.mark.skipif(
+        bool(os.environ.get("APPVEYOR")),
+        reason="https://github.com/pypa/setuptools/issues/851",
+    )
+    @pytest.mark.skipif(
+        platform.python_implementation() == 'PyPy' and six.PY3,
+        reason="https://github.com/pypa/setuptools/issues/1202",
+    )
+    def test_namespace_package_importable(self, tmpdir):
+        """
+        Installing two packages sharing the same namespace, one installed
+        naturally using pip or `--single-version-externally-managed`
+        and the other installed using `develop` should leave the namespace
+        in tact and both packages reachable by import.
+        """
+        pkg_A = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
+        pkg_B = namespaces.build_namespace_package(tmpdir, 'myns.pkgB')
+        target = tmpdir / 'packages'
+        # use pip to install to the target directory
+        install_cmd = [
+            sys.executable,
+            '-m',
+            'pip',
+            'install',
+            str(pkg_A),
+            '-t', str(target),
+        ]
+        subprocess.check_call(install_cmd)
+        self.install_develop(pkg_B, target)
+        namespaces.make_site_dir(target)
+        try_import = [
+            sys.executable,
+            '-c', 'import myns.pkgA; import myns.pkgB',
+        ]
+        with test.test.paths_on_pythonpath([str(target)]):
+            subprocess.check_call(try_import)
+
+        # additionally ensure that pkg_resources import works
+        pkg_resources_imp = [
+            sys.executable,
+            '-c', 'import pkg_resources',
+        ]
+        with test.test.paths_on_pythonpath([str(target)]):
+            subprocess.check_call(pkg_resources_imp)
diff --git a/setuptools/tests/test_dist.py b/setuptools/tests/test_dist.py
new file mode 100644
index 0000000..5162e1c
--- /dev/null
+++ b/setuptools/tests/test_dist.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import unicode_literals
+
+import io
+
+from setuptools import Distribution
+from setuptools.extern.six.moves.urllib.request import pathname2url
+from setuptools.extern.six.moves.urllib_parse import urljoin
+
+from .textwrap import DALS
+from .test_easy_install import make_nspkg_sdist
+
+import pytest
+
+
+def test_dist_fetch_build_egg(tmpdir):
+    """
+    Check multiple calls to `Distribution.fetch_build_egg` work as expected.
+    """
+    index = tmpdir.mkdir('index')
+    index_url = urljoin('file://', pathname2url(str(index)))
+
+    def sdist_with_index(distname, version):
+        dist_dir = index.mkdir(distname)
+        dist_sdist = '%s-%s.tar.gz' % (distname, version)
+        make_nspkg_sdist(str(dist_dir.join(dist_sdist)), distname, version)
+        with dist_dir.join('index.html').open('w') as fp:
+            fp.write(DALS(
+                '''
+                <!DOCTYPE html><html><body>
+                <a href="{dist_sdist}" rel="internal">{dist_sdist}</a><br/>
+                </body></html>
+                '''
+            ).format(dist_sdist=dist_sdist))
+    sdist_with_index('barbazquux', '3.2.0')
+    sdist_with_index('barbazquux-runner', '2.11.1')
+    with tmpdir.join('setup.cfg').open('w') as fp:
+        fp.write(DALS(
+            '''
+            [easy_install]
+            index_url = {index_url}
+            '''
+        ).format(index_url=index_url))
+    reqs = '''
+    barbazquux-runner
+    barbazquux
+    '''.split()
+    with tmpdir.as_cwd():
+        dist = Distribution()
+        dist.parse_config_files()
+        resolved_dists = [
+            dist.fetch_build_egg(r)
+            for r in reqs
+        ]
+    assert [dist.key for dist in resolved_dists if dist] == reqs
+
+
+def __maintainer_test_cases():
+    attrs = {"name": "package",
+             "version": "1.0",
+             "description": "xxx"}
+
+    def merge_dicts(d1, d2):
+        d1 = d1.copy()
+        d1.update(d2)
+
+        return d1
+
+    test_cases = [
+        ('No author, no maintainer', attrs.copy()),
+        ('Author (no e-mail), no maintainer', merge_dicts(
+            attrs,
+            {'author': 'Author Name'})),
+        ('Author (e-mail), no maintainer', merge_dicts(
+            attrs,
+            {'author': 'Author Name',
+             'author_email': 'author@name.com'})),
+        ('No author, maintainer (no e-mail)', merge_dicts(
+            attrs,
+            {'maintainer': 'Maintainer Name'})),
+        ('No author, maintainer (e-mail)', merge_dicts(
+            attrs,
+            {'maintainer': 'Maintainer Name',
+             'maintainer_email': 'maintainer@name.com'})),
+        ('Author (no e-mail), Maintainer (no-email)', merge_dicts(
+            attrs,
+            {'author': 'Author Name',
+             'maintainer': 'Maintainer Name'})),
+        ('Author (e-mail), Maintainer (e-mail)', merge_dicts(
+            attrs,
+            {'author': 'Author Name',
+             'author_email': 'author@name.com',
+             'maintainer': 'Maintainer Name',
+             'maintainer_email': 'maintainer@name.com'})),
+        ('No author (e-mail), no maintainer (e-mail)', merge_dicts(
+            attrs,
+            {'author_email': 'author@name.com',
+             'maintainer_email': 'maintainer@name.com'})),
+        ('Author unicode', merge_dicts(
+            attrs,
+            {'author': '鉄沢寛'})),
+        ('Maintainer unicode', merge_dicts(
+            attrs,
+            {'maintainer': 'Jan Łukasiewicz'})),
+    ]
+
+    return test_cases
+
+
+@pytest.mark.parametrize('name,attrs', __maintainer_test_cases())
+def test_maintainer_author(name, attrs, tmpdir):
+    tested_keys = {
+        'author': 'Author',
+        'author_email': 'Author-email',
+        'maintainer': 'Maintainer',
+        'maintainer_email': 'Maintainer-email',
+    }
+
+    # Generate a PKG-INFO file
+    dist = Distribution(attrs)
+    fn = tmpdir.mkdir('pkg_info')
+    fn_s = str(fn)
+
+    dist.metadata.write_pkg_info(fn_s)
+
+    with io.open(str(fn.join('PKG-INFO')), 'r', encoding='utf-8') as f:
+        raw_pkg_lines = f.readlines()
+
+    # Drop blank lines
+    pkg_lines = list(filter(None, raw_pkg_lines))
+
+    pkg_lines_set = set(pkg_lines)
+
+    # Duplicate lines should not be generated
+    assert len(pkg_lines) == len(pkg_lines_set)
+
+    for fkey, dkey in tested_keys.items():
+        val = attrs.get(dkey, None)
+        if val is None:
+            for line in pkg_lines:
+                assert not line.startswith(fkey + ':')
+        else:
+            line = '%s: %s' % (fkey, val)
+            assert line in pkg_lines_set
diff --git a/setuptools/tests/test_dist_info.py b/setuptools/tests/test_dist_info.py
new file mode 100644
index 0000000..f7e7d2b
--- /dev/null
+++ b/setuptools/tests/test_dist_info.py
@@ -0,0 +1,78 @@
+"""Test .dist-info style distributions.
+"""
+
+from __future__ import unicode_literals
+
+from setuptools.extern.six.moves import map
+
+import pytest
+
+import pkg_resources
+from .textwrap import DALS
+
+
+class TestDistInfo:
+
+    metadata_base = DALS("""
+        Metadata-Version: 1.2
+        Requires-Dist: splort (==4)
+        Provides-Extra: baz
+        Requires-Dist: quux (>=1.1); extra == 'baz'
+        """)
+
+    @classmethod
+    def build_metadata(cls, **kwargs):
+        lines = (
+            '{key}: {value}\n'.format(**locals())
+            for key, value in kwargs.items()
+        )
+        return cls.metadata_base + ''.join(lines)
+
+    @pytest.fixture
+    def metadata(self, tmpdir):
+        dist_info_name = 'VersionedDistribution-2.718.dist-info'
+        versioned = tmpdir / dist_info_name
+        versioned.mkdir()
+        filename = versioned / 'METADATA'
+        content = self.build_metadata(
+            Name='VersionedDistribution',
+        )
+        filename.write_text(content, encoding='utf-8')
+
+        dist_info_name = 'UnversionedDistribution.dist-info'
+        unversioned = tmpdir / dist_info_name
+        unversioned.mkdir()
+        filename = unversioned / 'METADATA'
+        content = self.build_metadata(
+            Name='UnversionedDistribution',
+            Version='0.3',
+        )
+        filename.write_text(content, encoding='utf-8')
+
+        return str(tmpdir)
+
+    def test_distinfo(self, metadata):
+        dists = dict(
+            (d.project_name, d)
+            for d in pkg_resources.find_distributions(metadata)
+        )
+
+        assert len(dists) == 2, dists
+
+        unversioned = dists['UnversionedDistribution']
+        versioned = dists['VersionedDistribution']
+
+        assert versioned.version == '2.718'  # from filename
+        assert unversioned.version == '0.3'  # from METADATA
+
+    def test_conditional_dependencies(self, metadata):
+        specs = 'splort==4', 'quux>=1.1'
+        requires = list(map(pkg_resources.Requirement.parse, specs))
+
+        for d in pkg_resources.find_distributions(metadata):
+            assert d.requires() == requires[:1]
+            assert d.requires(extras=('baz',)) == [
+                requires[0],
+                pkg_resources.Requirement.parse('quux>=1.1;extra=="baz"'),
+            ]
+            assert d.extras == ['baz']
diff --git a/setuptools/tests/test_easy_install.py b/setuptools/tests/test_easy_install.py
new file mode 100644
index 0000000..57339c8
--- /dev/null
+++ b/setuptools/tests/test_easy_install.py
@@ -0,0 +1,760 @@
+# -*- coding: utf-8 -*-
+"""Easy install Tests
+"""
+from __future__ import absolute_import
+
+import sys
+import os
+import tempfile
+import site
+import contextlib
+import tarfile
+import logging
+import itertools
+import distutils.errors
+import io
+import zipfile
+import mock
+
+import time
+from setuptools.extern.six.moves import urllib
+
+import pytest
+
+from setuptools import sandbox
+from setuptools.sandbox import run_setup
+import setuptools.command.easy_install as ei
+from setuptools.command.easy_install import PthDistributions
+from setuptools.command import easy_install as easy_install_pkg
+from setuptools.dist import Distribution
+from pkg_resources import normalize_path, working_set
+from pkg_resources import Distribution as PRDistribution
+import setuptools.tests.server
+from setuptools.tests import fail_on_ascii
+import pkg_resources
+
+from . import contexts
+from .textwrap import DALS
+
+
+class FakeDist(object):
+    def get_entry_map(self, group):
+        if group != 'console_scripts':
+            return {}
+        return {'name': 'ep'}
+
+    def as_requirement(self):
+        return 'spec'
+
+
+SETUP_PY = DALS("""
+    from setuptools import setup
+
+    setup(name='foo')
+    """)
+
+
+class TestEasyInstallTest:
+    def test_install_site_py(self, tmpdir):
+        dist = Distribution()
+        cmd = ei.easy_install(dist)
+        cmd.sitepy_installed = False
+        cmd.install_dir = str(tmpdir)
+        cmd.install_site_py()
+        assert (tmpdir / 'site.py').exists()
+
+    def test_get_script_args(self):
+        header = ei.CommandSpec.best().from_environment().as_header()
+        expected = header + DALS(r"""
+            # EASY-INSTALL-ENTRY-SCRIPT: 'spec','console_scripts','name'
+            __requires__ = 'spec'
+            import re
+            import sys
+            from pkg_resources import load_entry_point
+
+            if __name__ == '__main__':
+                sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+                sys.exit(
+                    load_entry_point('spec', 'console_scripts', 'name')()
+                )
+            """)
+        dist = FakeDist()
+
+        args = next(ei.ScriptWriter.get_args(dist))
+        name, script = itertools.islice(args, 2)
+
+        assert script == expected
+
+    def test_no_find_links(self):
+        # new option '--no-find-links', that blocks find-links added at
+        # the project level
+        dist = Distribution()
+        cmd = ei.easy_install(dist)
+        cmd.check_pth_processing = lambda: True
+        cmd.no_find_links = True
+        cmd.find_links = ['link1', 'link2']
+        cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
+        cmd.args = ['ok']
+        cmd.ensure_finalized()
+        assert cmd.package_index.scanned_urls == {}
+
+        # let's try without it (default behavior)
+        cmd = ei.easy_install(dist)
+        cmd.check_pth_processing = lambda: True
+        cmd.find_links = ['link1', 'link2']
+        cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
+        cmd.args = ['ok']
+        cmd.ensure_finalized()
+        keys = sorted(cmd.package_index.scanned_urls.keys())
+        assert keys == ['link1', 'link2']
+
+    def test_write_exception(self):
+        """
+        Test that `cant_write_to_target` is rendered as a DistutilsError.
+        """
+        dist = Distribution()
+        cmd = ei.easy_install(dist)
+        cmd.install_dir = os.getcwd()
+        with pytest.raises(distutils.errors.DistutilsError):
+            cmd.cant_write_to_target()
+
+    def test_all_site_dirs(self, monkeypatch):
+        """
+        get_site_dirs should always return site dirs reported by
+        site.getsitepackages.
+        """
+        path = normalize_path('/setuptools/test/site-packages')
+        mock_gsp = lambda: [path]
+        monkeypatch.setattr(site, 'getsitepackages', mock_gsp, raising=False)
+        assert path in ei.get_site_dirs()
+
+    def test_all_site_dirs_works_without_getsitepackages(self, monkeypatch):
+        monkeypatch.delattr(site, 'getsitepackages', raising=False)
+        assert ei.get_site_dirs()
+
+    @pytest.fixture
+    def sdist_unicode(self, tmpdir):
+        files = [
+            (
+                'setup.py',
+                DALS("""
+                    import setuptools
+                    setuptools.setup(
+                        name="setuptools-test-unicode",
+                        version="1.0",
+                        packages=["mypkg"],
+                        include_package_data=True,
+                    )
+                    """),
+            ),
+            (
+                'mypkg/__init__.py',
+                "",
+            ),
+            (
+                u'mypkg/\u2603.txt',
+                "",
+            ),
+        ]
+        sdist_name = 'setuptools-test-unicode-1.0.zip'
+        sdist = tmpdir / sdist_name
+        # can't use make_sdist, because the issue only occurs
+        #  with zip sdists.
+        sdist_zip = zipfile.ZipFile(str(sdist), 'w')
+        for filename, content in files:
+            sdist_zip.writestr(filename, content)
+        sdist_zip.close()
+        return str(sdist)
+
+    @fail_on_ascii
+    def test_unicode_filename_in_sdist(self, sdist_unicode, tmpdir, monkeypatch):
+        """
+        The install command should execute correctly even if
+        the package has unicode filenames.
+        """
+        dist = Distribution({'script_args': ['easy_install']})
+        target = (tmpdir / 'target').ensure_dir()
+        cmd = ei.easy_install(
+            dist,
+            install_dir=str(target),
+            args=['x'],
+        )
+        monkeypatch.setitem(os.environ, 'PYTHONPATH', str(target))
+        cmd.ensure_finalized()
+        cmd.easy_install(sdist_unicode)
+
+    @pytest.fixture
+    def sdist_script(self, tmpdir):
+        files = [
+            (
+                'setup.py',
+                DALS("""
+                    import setuptools
+                    setuptools.setup(
+                        name="setuptools-test-script",
+                        version="1.0",
+                        scripts=["mypkg_script"],
+                    )
+                    """),
+            ),
+            (
+                u'mypkg_script',
+                DALS("""
+                     #/usr/bin/python
+                     print('mypkg_script')
+                     """),
+            ),
+        ]
+        sdist_name = 'setuptools-test-script-1.0.zip'
+        sdist = str(tmpdir / sdist_name)
+        make_sdist(sdist, files)
+        return sdist
+
+    @pytest.mark.skipif(not sys.platform.startswith('linux'),
+                        reason="Test can only be run on Linux")
+    def test_script_install(self, sdist_script, tmpdir, monkeypatch):
+        """
+        Check scripts are installed.
+        """
+        dist = Distribution({'script_args': ['easy_install']})
+        target = (tmpdir / 'target').ensure_dir()
+        cmd = ei.easy_install(
+            dist,
+            install_dir=str(target),
+            args=['x'],
+        )
+        monkeypatch.setitem(os.environ, 'PYTHONPATH', str(target))
+        cmd.ensure_finalized()
+        cmd.easy_install(sdist_script)
+        assert (target / 'mypkg_script').exists()
+
+
+class TestPTHFileWriter:
+    def test_add_from_cwd_site_sets_dirty(self):
+        '''a pth file manager should set dirty
+        if a distribution is in site but also the cwd
+        '''
+        pth = PthDistributions('does-not_exist', [os.getcwd()])
+        assert not pth.dirty
+        pth.add(PRDistribution(os.getcwd()))
+        assert pth.dirty
+
+    def test_add_from_site_is_ignored(self):
+        location = '/test/location/does-not-have-to-exist'
+        # PthDistributions expects all locations to be normalized
+        location = pkg_resources.normalize_path(location)
+        pth = PthDistributions('does-not_exist', [location, ])
+        assert not pth.dirty
+        pth.add(PRDistribution(location))
+        assert not pth.dirty
+
+
+@pytest.yield_fixture
+def setup_context(tmpdir):
+    with (tmpdir / 'setup.py').open('w') as f:
+        f.write(SETUP_PY)
+    with tmpdir.as_cwd():
+        yield tmpdir
+
+
+@pytest.mark.usefixtures("user_override")
+@pytest.mark.usefixtures("setup_context")
+class TestUserInstallTest:
+
+    # prevent check that site-packages is writable. easy_install
+    # shouldn't be writing to system site-packages during finalize
+    # options, but while it does, bypass the behavior.
+    prev_sp_write = mock.patch(
+        'setuptools.command.easy_install.easy_install.check_site_dir',
+        mock.Mock(),
+    )
+
+    # simulate setuptools installed in user site packages
+    @mock.patch('setuptools.command.easy_install.__file__', site.USER_SITE)
+    @mock.patch('site.ENABLE_USER_SITE', True)
+    @prev_sp_write
+    def test_user_install_not_implied_user_site_enabled(self):
+        self.assert_not_user_site()
+
+    @mock.patch('site.ENABLE_USER_SITE', False)
+    @prev_sp_write
+    def test_user_install_not_implied_user_site_disabled(self):
+        self.assert_not_user_site()
+
+    @staticmethod
+    def assert_not_user_site():
+        # create a finalized easy_install command
+        dist = Distribution()
+        dist.script_name = 'setup.py'
+        cmd = ei.easy_install(dist)
+        cmd.args = ['py']
+        cmd.ensure_finalized()
+        assert not cmd.user, 'user should not be implied'
+
+    def test_multiproc_atexit(self):
+        pytest.importorskip('multiprocessing')
+
+        log = logging.getLogger('test_easy_install')
+        logging.basicConfig(level=logging.INFO, stream=sys.stderr)
+        log.info('this should not break')
+
+    @pytest.fixture()
+    def foo_package(self, tmpdir):
+        egg_file = tmpdir / 'foo-1.0.egg-info'
+        with egg_file.open('w') as f:
+            f.write('Name: foo\n')
+        return str(tmpdir)
+
+    @pytest.yield_fixture()
+    def install_target(self, tmpdir):
+        target = str(tmpdir)
+        with mock.patch('sys.path', sys.path + [target]):
+            python_path = os.path.pathsep.join(sys.path)
+            with mock.patch.dict(os.environ, PYTHONPATH=python_path):
+                yield target
+
+    def test_local_index(self, foo_package, install_target):
+        """
+        The local index must be used when easy_install locates installed
+        packages.
+        """
+        dist = Distribution()
+        dist.script_name = 'setup.py'
+        cmd = ei.easy_install(dist)
+        cmd.install_dir = install_target
+        cmd.args = ['foo']
+        cmd.ensure_finalized()
+        cmd.local_index.scan([foo_package])
+        res = cmd.easy_install('foo')
+        actual = os.path.normcase(os.path.realpath(res.location))
+        expected = os.path.normcase(os.path.realpath(foo_package))
+        assert actual == expected
+
+    @contextlib.contextmanager
+    def user_install_setup_context(self, *args, **kwargs):
+        """
+        Wrap sandbox.setup_context to patch easy_install in that context to
+        appear as user-installed.
+        """
+        with self.orig_context(*args, **kwargs):
+            import setuptools.command.easy_install as ei
+            ei.__file__ = site.USER_SITE
+            yield
+
+    def patched_setup_context(self):
+        self.orig_context = sandbox.setup_context
+
+        return mock.patch(
+            'setuptools.sandbox.setup_context',
+            self.user_install_setup_context,
+        )
+
+
+@pytest.yield_fixture
+def distutils_package():
+    distutils_setup_py = SETUP_PY.replace(
+        'from setuptools import setup',
+        'from distutils.core import setup',
+    )
+    with contexts.tempdir(cd=os.chdir):
+        with open('setup.py', 'w') as f:
+            f.write(distutils_setup_py)
+        yield
+
+
+class TestDistutilsPackage:
+    def test_bdist_egg_available_on_distutils_pkg(self, distutils_package):
+        run_setup('setup.py', ['bdist_egg'])
+
+
+class TestSetupRequires:
+    def test_setup_requires_honors_fetch_params(self):
+        """
+        When easy_install installs a source distribution which specifies
+        setup_requires, it should honor the fetch parameters (such as
+        allow-hosts, index-url, and find-links).
+        """
+        # set up a server which will simulate an alternate package index.
+        p_index = setuptools.tests.server.MockServer()
+        p_index.start()
+        netloc = 1
+        p_index_loc = urllib.parse.urlparse(p_index.url)[netloc]
+        if p_index_loc.endswith(':0'):
+            # Some platforms (Jython) don't find a port to which to bind,
+            #  so skip this test for them.
+            return
+        with contexts.quiet():
+            # create an sdist that has a build-time dependency.
+            with TestSetupRequires.create_sdist() as dist_file:
+                with contexts.tempdir() as temp_install_dir:
+                    with contexts.environment(PYTHONPATH=temp_install_dir):
+                        ei_params = [
+                            '--index-url', p_index.url,
+                            '--allow-hosts', p_index_loc,
+                            '--exclude-scripts',
+                            '--install-dir', temp_install_dir,
+                            dist_file,
+                        ]
+                        with sandbox.save_argv(['easy_install']):
+                            # attempt to install the dist. It should fail because
+                            #  it doesn't exist.
+                            with pytest.raises(SystemExit):
+                                easy_install_pkg.main(ei_params)
+        # there should have been two or three requests to the server
+        #  (three happens on Python 3.3a)
+        assert 2 <= len(p_index.requests) <= 3
+        assert p_index.requests[0].path == '/does-not-exist/'
+
+    @staticmethod
+    @contextlib.contextmanager
+    def create_sdist():
+        """
+        Return an sdist with a setup_requires dependency (of something that
+        doesn't exist)
+        """
+        with contexts.tempdir() as dir:
+            dist_path = os.path.join(dir, 'setuptools-test-fetcher-1.0.tar.gz')
+            make_sdist(dist_path, [
+                ('setup.py', DALS("""
+                    import setuptools
+                    setuptools.setup(
+                        name="setuptools-test-fetcher",
+                        version="1.0",
+                        setup_requires = ['does-not-exist'],
+                    )
+                """))])
+            yield dist_path
+
+    use_setup_cfg = (
+        (),
+        ('dependency_links',),
+        ('setup_requires',),
+        ('dependency_links', 'setup_requires'),
+    )
+
+    @pytest.mark.parametrize('use_setup_cfg', use_setup_cfg)
+    def test_setup_requires_overrides_version_conflict(self, use_setup_cfg):
+        """
+        Regression test for distribution issue 323:
+        https://bitbucket.org/tarek/distribute/issues/323
+
+        Ensures that a distribution's setup_requires requirements can still be
+        installed and used locally even if a conflicting version of that
+        requirement is already on the path.
+        """
+
+        fake_dist = PRDistribution('does-not-matter', project_name='foobar',
+                                   version='0.0')
+        working_set.add(fake_dist)
+
+        with contexts.save_pkg_resources_state():
+            with contexts.tempdir() as temp_dir:
+                test_pkg = create_setup_requires_package(temp_dir, use_setup_cfg=use_setup_cfg)
+                test_setup_py = os.path.join(test_pkg, 'setup.py')
+                with contexts.quiet() as (stdout, stderr):
+                    # Don't even need to install the package, just
+                    # running the setup.py at all is sufficient
+                    run_setup(test_setup_py, ['--name'])
+
+                lines = stdout.readlines()
+                assert len(lines) > 0
+                assert lines[-1].strip() == 'test_pkg'
+
+    @pytest.mark.parametrize('use_setup_cfg', use_setup_cfg)
+    def test_setup_requires_override_nspkg(self, use_setup_cfg):
+        """
+        Like ``test_setup_requires_overrides_version_conflict`` but where the
+        ``setup_requires`` package is part of a namespace package that has
+        *already* been imported.
+        """
+
+        with contexts.save_pkg_resources_state():
+            with contexts.tempdir() as temp_dir:
+                foobar_1_archive = os.path.join(temp_dir, 'foo.bar-0.1.tar.gz')
+                make_nspkg_sdist(foobar_1_archive, 'foo.bar', '0.1')
+                # Now actually go ahead an extract to the temp dir and add the
+                # extracted path to sys.path so foo.bar v0.1 is importable
+                foobar_1_dir = os.path.join(temp_dir, 'foo.bar-0.1')
+                os.mkdir(foobar_1_dir)
+                with tarfile.open(foobar_1_archive) as tf:
+                    tf.extractall(foobar_1_dir)
+                sys.path.insert(1, foobar_1_dir)
+
+                dist = PRDistribution(foobar_1_dir, project_name='foo.bar',
+                                      version='0.1')
+                working_set.add(dist)
+
+                template = DALS("""\
+                    import foo  # Even with foo imported first the
+                                # setup_requires package should override
+                    import setuptools
+                    setuptools.setup(**%r)
+
+                    if not (hasattr(foo, '__path__') and
+                            len(foo.__path__) == 2):
+                        print('FAIL')
+
+                    if 'foo.bar-0.2' not in foo.__path__[0]:
+                        print('FAIL')
+                """)
+
+                test_pkg = create_setup_requires_package(
+                    temp_dir, 'foo.bar', '0.2', make_nspkg_sdist, template,
+                    use_setup_cfg=use_setup_cfg)
+
+                test_setup_py = os.path.join(test_pkg, 'setup.py')
+
+                with contexts.quiet() as (stdout, stderr):
+                    try:
+                        # Don't even need to install the package, just
+                        # running the setup.py at all is sufficient
+                        run_setup(test_setup_py, ['--name'])
+                    except pkg_resources.VersionConflict:
+                        self.fail('Installing setup.py requirements '
+                            'caused a VersionConflict')
+
+                assert 'FAIL' not in stdout.getvalue()
+                lines = stdout.readlines()
+                assert len(lines) > 0
+                assert lines[-1].strip() == 'test_pkg'
+
+    @pytest.mark.parametrize('use_setup_cfg', use_setup_cfg)
+    def test_setup_requires_with_attr_version(self, use_setup_cfg):
+        def make_dependency_sdist(dist_path, distname, version):
+            make_sdist(dist_path, [
+                ('setup.py',
+                 DALS("""
+                      import setuptools
+                      setuptools.setup(
+                          name={name!r},
+                          version={version!r},
+                          py_modules=[{name!r}],
+                      )
+                      """.format(name=distname, version=version))),
+                (distname + '.py',
+                 DALS("""
+                      version = 42
+                      """
+                     ))])
+        with contexts.save_pkg_resources_state():
+            with contexts.tempdir() as temp_dir:
+                test_pkg = create_setup_requires_package(
+                    temp_dir, setup_attrs=dict(version='attr: foobar.version'),
+                    make_package=make_dependency_sdist,
+                    use_setup_cfg=use_setup_cfg+('version',),
+                )
+                test_setup_py = os.path.join(test_pkg, 'setup.py')
+                with contexts.quiet() as (stdout, stderr):
+                    run_setup(test_setup_py, ['--version'])
+                lines = stdout.readlines()
+                assert len(lines) > 0
+                assert lines[-1].strip() == '42'
+
+
+def make_trivial_sdist(dist_path, distname, version):
+    """
+    Create a simple sdist tarball at dist_path, containing just a simple
+    setup.py.
+    """
+
+    make_sdist(dist_path, [
+        ('setup.py',
+         DALS("""\
+             import setuptools
+             setuptools.setup(
+                 name=%r,
+                 version=%r
+             )
+         """ % (distname, version)))])
+
+
+def make_nspkg_sdist(dist_path, distname, version):
+    """
+    Make an sdist tarball with distname and version which also contains one
+    package with the same name as distname.  The top-level package is
+    designated a namespace package).
+    """
+
+    parts = distname.split('.')
+    nspackage = parts[0]
+
+    packages = ['.'.join(parts[:idx]) for idx in range(1, len(parts) + 1)]
+
+    setup_py = DALS("""\
+        import setuptools
+        setuptools.setup(
+            name=%r,
+            version=%r,
+            packages=%r,
+            namespace_packages=[%r]
+        )
+    """ % (distname, version, packages, nspackage))
+
+    init = "__import__('pkg_resources').declare_namespace(__name__)"
+
+    files = [('setup.py', setup_py),
+             (os.path.join(nspackage, '__init__.py'), init)]
+    for package in packages[1:]:
+        filename = os.path.join(*(package.split('.') + ['__init__.py']))
+        files.append((filename, ''))
+
+    make_sdist(dist_path, files)
+
+
+def make_sdist(dist_path, files):
+    """
+    Create a simple sdist tarball at dist_path, containing the files
+    listed in ``files`` as ``(filename, content)`` tuples.
+    """
+
+    with tarfile.open(dist_path, 'w:gz') as dist:
+        for filename, content in files:
+            file_bytes = io.BytesIO(content.encode('utf-8'))
+            file_info = tarfile.TarInfo(name=filename)
+            file_info.size = len(file_bytes.getvalue())
+            file_info.mtime = int(time.time())
+            dist.addfile(file_info, fileobj=file_bytes)
+
+
+def create_setup_requires_package(path, distname='foobar', version='0.1',
+                                  make_package=make_trivial_sdist,
+                                  setup_py_template=None, setup_attrs={},
+                                  use_setup_cfg=()):
+    """Creates a source tree under path for a trivial test package that has a
+    single requirement in setup_requires--a tarball for that requirement is
+    also created and added to the dependency_links argument.
+
+    ``distname`` and ``version`` refer to the name/version of the package that
+    the test package requires via ``setup_requires``.  The name of the test
+    package itself is just 'test_pkg'.
+    """
+
+    test_setup_attrs = {
+        'name': 'test_pkg', 'version': '0.0',
+        'setup_requires': ['%s==%s' % (distname, version)],
+        'dependency_links': [os.path.abspath(path)]
+    }
+    test_setup_attrs.update(setup_attrs)
+
+    test_pkg = os.path.join(path, 'test_pkg')
+    os.mkdir(test_pkg)
+
+    if use_setup_cfg:
+        test_setup_cfg = os.path.join(test_pkg, 'setup.cfg')
+        options = []
+        metadata = []
+        for name in use_setup_cfg:
+            value = test_setup_attrs.pop(name)
+            if name in 'name version'.split():
+                section = metadata
+            else:
+                section = options
+            if isinstance(value, (tuple, list)):
+                value = ';'.join(value)
+            section.append('%s: %s' % (name, value))
+        with open(test_setup_cfg, 'w') as f:
+            f.write(DALS(
+                """
+                [metadata]
+                {metadata}
+                [options]
+                {options}
+                """
+            ).format(
+                options='\n'.join(options),
+                metadata='\n'.join(metadata),
+            ))
+
+    test_setup_py = os.path.join(test_pkg, 'setup.py')
+
+    if setup_py_template is None:
+        setup_py_template = DALS("""\
+            import setuptools
+            setuptools.setup(**%r)
+        """)
+
+    with open(test_setup_py, 'w') as f:
+        f.write(setup_py_template % test_setup_attrs)
+
+    foobar_path = os.path.join(path, '%s-%s.tar.gz' % (distname, version))
+    make_package(foobar_path, distname, version)
+
+    return test_pkg
+
+
+@pytest.mark.skipif(
+    sys.platform.startswith('java') and ei.is_sh(sys.executable),
+    reason="Test cannot run under java when executable is sh"
+)
+class TestScriptHeader:
+    non_ascii_exe = '/Users/José/bin/python'
+    exe_with_spaces = r'C:\Program Files\Python33\python.exe'
+
+    def test_get_script_header(self):
+        expected = '#!%s\n' % ei.nt_quote_arg(os.path.normpath(sys.executable))
+        actual = ei.ScriptWriter.get_script_header('#!/usr/local/bin/python')
+        assert actual == expected
+
+    def test_get_script_header_args(self):
+        expected = '#!%s -x\n' % ei.nt_quote_arg(os.path.normpath
+            (sys.executable))
+        actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python -x')
+        assert actual == expected
+
+    def test_get_script_header_non_ascii_exe(self):
+        actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python',
+            executable=self.non_ascii_exe)
+        expected = '#!%s -x\n' % self.non_ascii_exe
+        assert actual == expected
+
+    def test_get_script_header_exe_with_spaces(self):
+        actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python',
+            executable='"' + self.exe_with_spaces + '"')
+        expected = '#!"%s"\n' % self.exe_with_spaces
+        assert actual == expected
+
+
+class TestCommandSpec:
+    def test_custom_launch_command(self):
+        """
+        Show how a custom CommandSpec could be used to specify a #! executable
+        which takes parameters.
+        """
+        cmd = ei.CommandSpec(['/usr/bin/env', 'python3'])
+        assert cmd.as_header() == '#!/usr/bin/env python3\n'
+
+    def test_from_param_for_CommandSpec_is_passthrough(self):
+        """
+        from_param should return an instance of a CommandSpec
+        """
+        cmd = ei.CommandSpec(['python'])
+        cmd_new = ei.CommandSpec.from_param(cmd)
+        assert cmd is cmd_new
+
+    @mock.patch('sys.executable', TestScriptHeader.exe_with_spaces)
+    @mock.patch.dict(os.environ)
+    def test_from_environment_with_spaces_in_executable(self):
+        os.environ.pop('__PYVENV_LAUNCHER__', None)
+        cmd = ei.CommandSpec.from_environment()
+        assert len(cmd) == 1
+        assert cmd.as_header().startswith('#!"')
+
+    def test_from_simple_string_uses_shlex(self):
+        """
+        In order to support `executable = /usr/bin/env my-python`, make sure
+        from_param invokes shlex on that input.
+        """
+        cmd = ei.CommandSpec.from_param('/usr/bin/env my-python')
+        assert len(cmd) == 2
+        assert '"' not in cmd.as_header()
+
+
+class TestWindowsScriptWriter:
+    def test_header(self):
+        hdr = ei.WindowsScriptWriter.get_script_header('')
+        assert hdr.startswith('#!')
+        assert hdr.endswith('\n')
+        hdr = hdr.lstrip('#!')
+        hdr = hdr.rstrip('\n')
+        # header should not start with an escaped quote
+        assert not hdr.startswith('\\"')
diff --git a/setuptools/tests/test_egg_info.py b/setuptools/tests/test_egg_info.py
new file mode 100644
index 0000000..2a070de
--- /dev/null
+++ b/setuptools/tests/test_egg_info.py
@@ -0,0 +1,598 @@
+import sys
+import ast
+import os
+import glob
+import re
+import stat
+
+from setuptools.command.egg_info import egg_info, manifest_maker
+from setuptools.dist import Distribution
+from setuptools.extern.six.moves import map
+
+import pytest
+
+from . import environment
+from .files import build_files
+from .textwrap import DALS
+from . import contexts
+
+
+class Environment(str):
+    pass
+
+
+class TestEggInfo(object):
+
+    setup_script = DALS("""
+        from setuptools import setup
+
+        setup(
+            name='foo',
+            py_modules=['hello'],
+            entry_points={'console_scripts': ['hi = hello.run']},
+            zip_safe=False,
+        )
+        """)
+
+    def _create_project(self):
+        build_files({
+            'setup.py': self.setup_script,
+            'hello.py': DALS("""
+                def run():
+                    print('hello')
+                """)
+        })
+
+    @pytest.yield_fixture
+    def env(self):
+        with contexts.tempdir(prefix='setuptools-test.') as env_dir:
+            env = Environment(env_dir)
+            os.chmod(env_dir, stat.S_IRWXU)
+            subs = 'home', 'lib', 'scripts', 'data', 'egg-base'
+            env.paths = dict(
+                (dirname, os.path.join(env_dir, dirname))
+                for dirname in subs
+            )
+            list(map(os.mkdir, env.paths.values()))
+            build_files({
+                env.paths['home']: {
+                    '.pydistutils.cfg': DALS("""
+                    [egg_info]
+                    egg-base = %(egg-base)s
+                    """ % env.paths)
+                }
+            })
+            yield env
+
+    def test_egg_info_save_version_info_setup_empty(self, tmpdir_cwd, env):
+        """
+        When the egg_info section is empty or not present, running
+        save_version_info should add the settings to the setup.cfg
+        in a deterministic order, consistent with the ordering found
+        on Python 2.7 with PYTHONHASHSEED=0.
+        """
+        setup_cfg = os.path.join(env.paths['home'], 'setup.cfg')
+        dist = Distribution()
+        ei = egg_info(dist)
+        ei.initialize_options()
+        ei.save_version_info(setup_cfg)
+
+        with open(setup_cfg, 'r') as f:
+            content = f.read()
+
+        assert '[egg_info]' in content
+        assert 'tag_build =' in content
+        assert 'tag_date = 0' in content
+
+        expected_order = 'tag_build', 'tag_date',
+
+        self._validate_content_order(content, expected_order)
+
+    @staticmethod
+    def _validate_content_order(content, expected):
+        """
+        Assert that the strings in expected appear in content
+        in order.
+        """
+        pattern = '.*'.join(expected)
+        flags = re.MULTILINE | re.DOTALL
+        assert re.search(pattern, content, flags)
+
+    def test_egg_info_save_version_info_setup_defaults(self, tmpdir_cwd, env):
+        """
+        When running save_version_info on an existing setup.cfg
+        with the 'default' values present from a previous run,
+        the file should remain unchanged.
+        """
+        setup_cfg = os.path.join(env.paths['home'], 'setup.cfg')
+        build_files({
+            setup_cfg: DALS("""
+            [egg_info]
+            tag_build =
+            tag_date = 0
+            """),
+        })
+        dist = Distribution()
+        ei = egg_info(dist)
+        ei.initialize_options()
+        ei.save_version_info(setup_cfg)
+
+        with open(setup_cfg, 'r') as f:
+            content = f.read()
+
+        assert '[egg_info]' in content
+        assert 'tag_build =' in content
+        assert 'tag_date = 0' in content
+
+        expected_order = 'tag_build', 'tag_date',
+
+        self._validate_content_order(content, expected_order)
+
+    def test_egg_base_installed_egg_info(self, tmpdir_cwd, env):
+        self._create_project()
+
+        self._run_install_command(tmpdir_cwd, env)
+        actual = self._find_egg_info_files(env.paths['lib'])
+
+        expected = [
+            'PKG-INFO',
+            'SOURCES.txt',
+            'dependency_links.txt',
+            'entry_points.txt',
+            'not-zip-safe',
+            'top_level.txt',
+        ]
+        assert sorted(actual) == expected
+
+    def test_manifest_template_is_read(self, tmpdir_cwd, env):
+        self._create_project()
+        build_files({
+            'MANIFEST.in': DALS("""
+                recursive-include docs *.rst
+            """),
+            'docs': {
+                'usage.rst': "Run 'hi'",
+            }
+        })
+        self._run_install_command(tmpdir_cwd, env)
+        egg_info_dir = self._find_egg_info_files(env.paths['lib']).base
+        sources_txt = os.path.join(egg_info_dir, 'SOURCES.txt')
+        with open(sources_txt) as f:
+            assert 'docs/usage.rst' in f.read().split('\n')
+
+    def _setup_script_with_requires(self, requires, use_setup_cfg=False):
+        setup_script = DALS(
+            '''
+            from setuptools import setup
+
+            setup(name='foo', zip_safe=False, %s)
+            '''
+        ) % ('' if use_setup_cfg else requires)
+        setup_config = requires if use_setup_cfg else ''
+        build_files({'setup.py': setup_script,
+                     'setup.cfg': setup_config})
+
+    mismatch_marker = "python_version<'{this_ver}'".format(
+        this_ver=sys.version_info[0],
+    )
+    # Alternate equivalent syntax.
+    mismatch_marker_alternate = 'python_version < "{this_ver}"'.format(
+        this_ver=sys.version_info[0],
+    )
+    invalid_marker = "<=>++"
+
+    class RequiresTestHelper(object):
+
+        @staticmethod
+        def parametrize(*test_list, **format_dict):
+            idlist = []
+            argvalues = []
+            for test in test_list:
+                test_params = test.lstrip().split('\n\n', 3)
+                name_kwargs = test_params.pop(0).split('\n')
+                if len(name_kwargs) > 1:
+                    val = name_kwargs[1].strip()
+                    install_cmd_kwargs = ast.literal_eval(val)
+                else:
+                    install_cmd_kwargs = {}
+                name = name_kwargs[0].strip()
+                setup_py_requires, setup_cfg_requires, expected_requires = (
+                    DALS(a).format(**format_dict) for a in test_params
+                )
+                for id_, requires, use_cfg in (
+                    (name, setup_py_requires, False),
+                    (name + '_in_setup_cfg', setup_cfg_requires, True),
+                ):
+                    idlist.append(id_)
+                    marks = ()
+                    if requires.startswith('@xfail\n'):
+                        requires = requires[7:]
+                        marks = pytest.mark.xfail
+                    argvalues.append(pytest.param(requires, use_cfg,
+                                                  expected_requires,
+                                                  install_cmd_kwargs,
+                                                  marks=marks))
+            return pytest.mark.parametrize(
+                'requires,use_setup_cfg,'
+                'expected_requires,install_cmd_kwargs',
+                argvalues, ids=idlist,
+            )
+
+    @RequiresTestHelper.parametrize(
+        # Format of a test:
+        #
+        # id
+        # install_cmd_kwargs [optional]
+        #
+        # requires block (when used in setup.py)
+        #
+        # requires block (when used in setup.cfg)
+        #
+        # expected contents of requires.txt
+
+        '''
+        install_requires_deterministic
+
+        install_requires=["fake-factory==0.5.2", "pytz"]
+
+        [options]
+        install_requires =
+            fake-factory==0.5.2
+            pytz
+
+        fake-factory==0.5.2
+        pytz
+        ''',
+
+        '''
+        install_requires_ordered
+
+        install_requires=["fake-factory>=1.12.3,!=2.0"]
+
+        [options]
+        install_requires =
+            fake-factory>=1.12.3,!=2.0
+
+        fake-factory!=2.0,>=1.12.3
+        ''',
+
+        '''
+        install_requires_with_marker
+
+        install_requires=["barbazquux;{mismatch_marker}"],
+
+        [options]
+        install_requires =
+            barbazquux; {mismatch_marker}
+
+        [:{mismatch_marker_alternate}]
+        barbazquux
+        ''',
+
+        '''
+        install_requires_with_extra
+        {'cmd': ['egg_info']}
+
+        install_requires=["barbazquux [test]"],
+
+        [options]
+        install_requires =
+            barbazquux [test]
+
+        barbazquux[test]
+        ''',
+
+        '''
+        install_requires_with_extra_and_marker
+
+        install_requires=["barbazquux [test]; {mismatch_marker}"],
+
+        [options]
+        install_requires =
+            barbazquux [test]; {mismatch_marker}
+
+        [:{mismatch_marker_alternate}]
+        barbazquux[test]
+        ''',
+
+        '''
+        setup_requires_with_markers
+
+        setup_requires=["barbazquux;{mismatch_marker}"],
+
+        [options]
+        setup_requires =
+            barbazquux; {mismatch_marker}
+
+        ''',
+
+        '''
+        tests_require_with_markers
+        {'cmd': ['test'], 'output': "Ran 0 tests in"}
+
+        tests_require=["barbazquux;{mismatch_marker}"],
+
+        [options]
+        tests_require =
+            barbazquux; {mismatch_marker}
+
+        ''',
+
+        '''
+        extras_require_with_extra
+        {'cmd': ['egg_info']}
+
+        extras_require={{"extra": ["barbazquux [test]"]}},
+
+        [options.extras_require]
+        extra = barbazquux [test]
+
+        [extra]
+        barbazquux[test]
+        ''',
+
+        '''
+        extras_require_with_extra_and_marker_in_req
+
+        extras_require={{"extra": ["barbazquux [test]; {mismatch_marker}"]}},
+
+        [options.extras_require]
+        extra =
+            barbazquux [test]; {mismatch_marker}
+
+        [extra]
+
+        [extra:{mismatch_marker_alternate}]
+        barbazquux[test]
+        ''',
+
+        # FIXME: ConfigParser does not allow : in key names!
+        '''
+        extras_require_with_marker
+
+        extras_require={{":{mismatch_marker}": ["barbazquux"]}},
+
+        @xfail
+        [options.extras_require]
+        :{mismatch_marker} = barbazquux
+
+        [:{mismatch_marker}]
+        barbazquux
+        ''',
+
+        '''
+        extras_require_with_marker_in_req
+
+        extras_require={{"extra": ["barbazquux; {mismatch_marker}"]}},
+
+        [options.extras_require]
+        extra =
+            barbazquux; {mismatch_marker}
+
+        [extra]
+
+        [extra:{mismatch_marker_alternate}]
+        barbazquux
+        ''',
+
+        '''
+        extras_require_with_empty_section
+
+        extras_require={{"empty": []}},
+
+        [options.extras_require]
+        empty =
+
+        [empty]
+        ''',
+        # Format arguments.
+        invalid_marker=invalid_marker,
+        mismatch_marker=mismatch_marker,
+        mismatch_marker_alternate=mismatch_marker_alternate,
+    )
+    def test_requires(
+            self, tmpdir_cwd, env, requires, use_setup_cfg,
+            expected_requires, install_cmd_kwargs):
+        self._setup_script_with_requires(requires, use_setup_cfg)
+        self._run_install_command(tmpdir_cwd, env, **install_cmd_kwargs)
+        egg_info_dir = os.path.join('.', 'foo.egg-info')
+        requires_txt = os.path.join(egg_info_dir, 'requires.txt')
+        if os.path.exists(requires_txt):
+            with open(requires_txt) as fp:
+                install_requires = fp.read()
+        else:
+            install_requires = ''
+        assert install_requires.lstrip() == expected_requires
+        assert glob.glob(os.path.join(env.paths['lib'], 'barbazquux*')) == []
+
+    def test_install_requires_unordered_disallowed(self, tmpdir_cwd, env):
+        """
+        Packages that pass unordered install_requires sequences
+        should be rejected as they produce non-deterministic
+        builds. See #458.
+        """
+        req = 'install_requires={"fake-factory==0.5.2", "pytz"}'
+        self._setup_script_with_requires(req)
+        with pytest.raises(AssertionError):
+            self._run_install_command(tmpdir_cwd, env)
+
+    def test_extras_require_with_invalid_marker(self, tmpdir_cwd, env):
+        tmpl = 'extras_require={{":{marker}": ["barbazquux"]}},'
+        req = tmpl.format(marker=self.invalid_marker)
+        self._setup_script_with_requires(req)
+        with pytest.raises(AssertionError):
+            self._run_install_command(tmpdir_cwd, env)
+        assert glob.glob(os.path.join(env.paths['lib'], 'barbazquux*')) == []
+
+    def test_extras_require_with_invalid_marker_in_req(self, tmpdir_cwd, env):
+        tmpl = 'extras_require={{"extra": ["barbazquux; {marker}"]}},'
+        req = tmpl.format(marker=self.invalid_marker)
+        self._setup_script_with_requires(req)
+        with pytest.raises(AssertionError):
+            self._run_install_command(tmpdir_cwd, env)
+        assert glob.glob(os.path.join(env.paths['lib'], 'barbazquux*')) == []
+
+    def test_provides_extra(self, tmpdir_cwd, env):
+        self._setup_script_with_requires(
+            'extras_require={"foobar": ["barbazquux"]},')
+        environ = os.environ.copy().update(
+            HOME=env.paths['home'],
+        )
+        code, data = environment.run_setup_py(
+            cmd=['egg_info'],
+            pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
+            data_stream=1,
+            env=environ,
+        )
+        egg_info_dir = os.path.join('.', 'foo.egg-info')
+        with open(os.path.join(egg_info_dir, 'PKG-INFO')) as pkginfo_file:
+            pkg_info_lines = pkginfo_file.read().split('\n')
+        assert 'Provides-Extra: foobar' in pkg_info_lines
+        assert 'Metadata-Version: 2.1' in pkg_info_lines
+
+    def test_doesnt_provides_extra(self, tmpdir_cwd, env):
+        self._setup_script_with_requires(
+            '''install_requires=["spam ; python_version<'3.3'"]''')
+        environ = os.environ.copy().update(
+            HOME=env.paths['home'],
+        )
+        environment.run_setup_py(
+            cmd=['egg_info'],
+            pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
+            data_stream=1,
+            env=environ,
+        )
+        egg_info_dir = os.path.join('.', 'foo.egg-info')
+        with open(os.path.join(egg_info_dir, 'PKG-INFO')) as pkginfo_file:
+            pkg_info_text = pkginfo_file.read()
+        assert 'Provides-Extra:' not in pkg_info_text
+
+    def test_long_description_content_type(self, tmpdir_cwd, env):
+        # Test that specifying a `long_description_content_type` keyword arg to
+        # the `setup` function results in writing a `Description-Content-Type`
+        # line to the `PKG-INFO` file in the `<distribution>.egg-info`
+        # directory.
+        # `Description-Content-Type` is described at
+        # https://github.com/pypa/python-packaging-user-guide/pull/258
+
+        self._setup_script_with_requires(
+            """long_description_content_type='text/markdown',""")
+        environ = os.environ.copy().update(
+            HOME=env.paths['home'],
+        )
+        code, data = environment.run_setup_py(
+            cmd=['egg_info'],
+            pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
+            data_stream=1,
+            env=environ,
+        )
+        egg_info_dir = os.path.join('.', 'foo.egg-info')
+        with open(os.path.join(egg_info_dir, 'PKG-INFO')) as pkginfo_file:
+            pkg_info_lines = pkginfo_file.read().split('\n')
+        expected_line = 'Description-Content-Type: text/markdown'
+        assert expected_line in pkg_info_lines
+        assert 'Metadata-Version: 2.1' in pkg_info_lines
+
+    def test_project_urls(self, tmpdir_cwd, env):
+        # Test that specifying a `project_urls` dict to the `setup`
+        # function results in writing multiple `Project-URL` lines to
+        # the `PKG-INFO` file in the `<distribution>.egg-info`
+        # directory.
+        # `Project-URL` is described at https://packaging.python.org
+        #     /specifications/core-metadata/#project-url-multiple-use
+
+        self._setup_script_with_requires(
+            """project_urls={
+                'Link One': 'https://example.com/one/',
+                'Link Two': 'https://example.com/two/',
+                },""")
+        environ = os.environ.copy().update(
+            HOME=env.paths['home'],
+        )
+        code, data = environment.run_setup_py(
+            cmd=['egg_info'],
+            pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
+            data_stream=1,
+            env=environ,
+        )
+        egg_info_dir = os.path.join('.', 'foo.egg-info')
+        with open(os.path.join(egg_info_dir, 'PKG-INFO')) as pkginfo_file:
+            pkg_info_lines = pkginfo_file.read().split('\n')
+        expected_line = 'Project-URL: Link One, https://example.com/one/'
+        assert expected_line in pkg_info_lines
+        expected_line = 'Project-URL: Link Two, https://example.com/two/'
+        assert expected_line in pkg_info_lines
+
+    def test_python_requires_egg_info(self, tmpdir_cwd, env):
+        self._setup_script_with_requires(
+            """python_requires='>=2.7.12',""")
+        environ = os.environ.copy().update(
+            HOME=env.paths['home'],
+        )
+        code, data = environment.run_setup_py(
+            cmd=['egg_info'],
+            pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
+            data_stream=1,
+            env=environ,
+        )
+        egg_info_dir = os.path.join('.', 'foo.egg-info')
+        with open(os.path.join(egg_info_dir, 'PKG-INFO')) as pkginfo_file:
+            pkg_info_lines = pkginfo_file.read().split('\n')
+        assert 'Requires-Python: >=2.7.12' in pkg_info_lines
+        assert 'Metadata-Version: 1.2' in pkg_info_lines
+
+    def test_python_requires_install(self, tmpdir_cwd, env):
+        self._setup_script_with_requires(
+            """python_requires='>=1.2.3',""")
+        self._run_install_command(tmpdir_cwd, env)
+        egg_info_dir = self._find_egg_info_files(env.paths['lib']).base
+        pkginfo = os.path.join(egg_info_dir, 'PKG-INFO')
+        with open(pkginfo) as f:
+            assert 'Requires-Python: >=1.2.3' in f.read().split('\n')
+
+    def test_manifest_maker_warning_suppression(self):
+        fixtures = [
+            "standard file not found: should have one of foo.py, bar.py",
+            "standard file 'setup.py' not found"
+        ]
+
+        for msg in fixtures:
+            assert manifest_maker._should_suppress_warning(msg)
+
+    def _run_install_command(self, tmpdir_cwd, env, cmd=None, output=None):
+        environ = os.environ.copy().update(
+            HOME=env.paths['home'],
+        )
+        if cmd is None:
+            cmd = [
+                'install',
+                '--home', env.paths['home'],
+                '--install-lib', env.paths['lib'],
+                '--install-scripts', env.paths['scripts'],
+                '--install-data', env.paths['data'],
+            ]
+        code, data = environment.run_setup_py(
+            cmd=cmd,
+            pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
+            data_stream=1,
+            env=environ,
+        )
+        if code:
+            raise AssertionError(data)
+        if output:
+            assert output in data
+
+    def _find_egg_info_files(self, root):
+        class DirList(list):
+            def __init__(self, files, base):
+                super(DirList, self).__init__(files)
+                self.base = base
+
+        results = (
+            DirList(filenames, dirpath)
+            for dirpath, dirnames, filenames in os.walk(root)
+            if os.path.basename(dirpath) == 'EGG-INFO'
+        )
+        # expect exactly one result
+        result, = results
+        return result
diff --git a/setuptools/tests/test_find_packages.py b/setuptools/tests/test_find_packages.py
new file mode 100644
index 0000000..a6023de
--- /dev/null
+++ b/setuptools/tests/test_find_packages.py
@@ -0,0 +1,182 @@
+"""Tests for setuptools.find_packages()."""
+import os
+import sys
+import shutil
+import tempfile
+import platform
+
+import pytest
+
+import setuptools
+from setuptools import find_packages
+
+find_420_packages = setuptools.PEP420PackageFinder.find
+
+# modeled after CPython's test.support.can_symlink
+
+
+def can_symlink():
+    TESTFN = tempfile.mktemp()
+    symlink_path = TESTFN + "can_symlink"
+    try:
+        os.symlink(TESTFN, symlink_path)
+        can = True
+    except (OSError, NotImplementedError, AttributeError):
+        can = False
+    else:
+        os.remove(symlink_path)
+    globals().update(can_symlink=lambda: can)
+    return can
+
+
+def has_symlink():
+    bad_symlink = (
+        # Windows symlink directory detection is broken on Python 3.2
+        platform.system() == 'Windows' and sys.version_info[:2] == (3, 2)
+    )
+    return can_symlink() and not bad_symlink
+
+
+class TestFindPackages:
+    def setup_method(self, method):
+        self.dist_dir = tempfile.mkdtemp()
+        self._make_pkg_structure()
+
+    def teardown_method(self, method):
+        shutil.rmtree(self.dist_dir)
+
+    def _make_pkg_structure(self):
+        """Make basic package structure.
+
+        dist/
+            docs/
+                conf.py
+            pkg/
+                __pycache__/
+                nspkg/
+                    mod.py
+                subpkg/
+                    assets/
+                        asset
+                    __init__.py
+            setup.py
+
+        """
+        self.docs_dir = self._mkdir('docs', self.dist_dir)
+        self._touch('conf.py', self.docs_dir)
+        self.pkg_dir = self._mkdir('pkg', self.dist_dir)
+        self._mkdir('__pycache__', self.pkg_dir)
+        self.ns_pkg_dir = self._mkdir('nspkg', self.pkg_dir)
+        self._touch('mod.py', self.ns_pkg_dir)
+        self.sub_pkg_dir = self._mkdir('subpkg', self.pkg_dir)
+        self.asset_dir = self._mkdir('assets', self.sub_pkg_dir)
+        self._touch('asset', self.asset_dir)
+        self._touch('__init__.py', self.sub_pkg_dir)
+        self._touch('setup.py', self.dist_dir)
+
+    def _mkdir(self, path, parent_dir=None):
+        if parent_dir:
+            path = os.path.join(parent_dir, path)
+        os.mkdir(path)
+        return path
+
+    def _touch(self, path, dir_=None):
+        if dir_:
+            path = os.path.join(dir_, path)
+        fp = open(path, 'w')
+        fp.close()
+        return path
+
+    def test_regular_package(self):
+        self._touch('__init__.py', self.pkg_dir)
+        packages = find_packages(self.dist_dir)
+        assert packages == ['pkg', 'pkg.subpkg']
+
+    def test_exclude(self):
+        self._touch('__init__.py', self.pkg_dir)
+        packages = find_packages(self.dist_dir, exclude=('pkg.*',))
+        assert packages == ['pkg']
+
+    def test_exclude_recursive(self):
+        """
+        Excluding a parent package should not exclude child packages as well.
+        """
+        self._touch('__init__.py', self.pkg_dir)
+        self._touch('__init__.py', self.sub_pkg_dir)
+        packages = find_packages(self.dist_dir, exclude=('pkg',))
+        assert packages == ['pkg.subpkg']
+
+    def test_include_excludes_other(self):
+        """
+        If include is specified, other packages should be excluded.
+        """
+        self._touch('__init__.py', self.pkg_dir)
+        alt_dir = self._mkdir('other_pkg', self.dist_dir)
+        self._touch('__init__.py', alt_dir)
+        packages = find_packages(self.dist_dir, include=['other_pkg'])
+        assert packages == ['other_pkg']
+
+    def test_dir_with_dot_is_skipped(self):
+        shutil.rmtree(os.path.join(self.dist_dir, 'pkg/subpkg/assets'))
+        data_dir = self._mkdir('some.data', self.pkg_dir)
+        self._touch('__init__.py', data_dir)
+        self._touch('file.dat', data_dir)
+        packages = find_packages(self.dist_dir)
+        assert 'pkg.some.data' not in packages
+
+    def test_dir_with_packages_in_subdir_is_excluded(self):
+        """
+        Ensure that a package in a non-package such as build/pkg/__init__.py
+        is excluded.
+        """
+        build_dir = self._mkdir('build', self.dist_dir)
+        build_pkg_dir = self._mkdir('pkg', build_dir)
+        self._touch('__init__.py', build_pkg_dir)
+        packages = find_packages(self.dist_dir)
+        assert 'build.pkg' not in packages
+
+    @pytest.mark.skipif(not has_symlink(), reason='Symlink support required')
+    def test_symlinked_packages_are_included(self):
+        """
+        A symbolically-linked directory should be treated like any other
+        directory when matched as a package.
+
+        Create a link from lpkg -> pkg.
+        """
+        self._touch('__init__.py', self.pkg_dir)
+        linked_pkg = os.path.join(self.dist_dir, 'lpkg')
+        os.symlink('pkg', linked_pkg)
+        assert os.path.isdir(linked_pkg)
+        packages = find_packages(self.dist_dir)
+        assert 'lpkg' in packages
+
+    def _assert_packages(self, actual, expected):
+        assert set(actual) == set(expected)
+
+    def test_pep420_ns_package(self):
+        packages = find_420_packages(
+            self.dist_dir, include=['pkg*'], exclude=['pkg.subpkg.assets'])
+        self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
+
+    def test_pep420_ns_package_no_includes(self):
+        packages = find_420_packages(
+            self.dist_dir, exclude=['pkg.subpkg.assets'])
+        self._assert_packages(packages, ['docs', 'pkg', 'pkg.nspkg', 'pkg.subpkg'])
+
+    def test_pep420_ns_package_no_includes_or_excludes(self):
+        packages = find_420_packages(self.dist_dir)
+        expected = [
+            'docs', 'pkg', 'pkg.nspkg', 'pkg.subpkg', 'pkg.subpkg.assets']
+        self._assert_packages(packages, expected)
+
+    def test_regular_package_with_nested_pep420_ns_packages(self):
+        self._touch('__init__.py', self.pkg_dir)
+        packages = find_420_packages(
+            self.dist_dir, exclude=['docs', 'pkg.subpkg.assets'])
+        self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
+
+    def test_pep420_ns_package_no_non_package_dirs(self):
+        shutil.rmtree(self.docs_dir)
+        shutil.rmtree(os.path.join(self.dist_dir, 'pkg/subpkg/assets'))
+        packages = find_420_packages(self.dist_dir)
+        self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
diff --git a/setuptools/tests/test_install_scripts.py b/setuptools/tests/test_install_scripts.py
new file mode 100644
index 0000000..7393241
--- /dev/null
+++ b/setuptools/tests/test_install_scripts.py
@@ -0,0 +1,88 @@
+"""install_scripts tests
+"""
+
+import io
+import sys
+
+import pytest
+
+from setuptools.command.install_scripts import install_scripts
+from setuptools.dist import Distribution
+from . import contexts
+
+
+class TestInstallScripts:
+    settings = dict(
+        name='foo',
+        entry_points={'console_scripts': ['foo=foo:foo']},
+        version='0.0',
+    )
+    unix_exe = '/usr/dummy-test-path/local/bin/python'
+    unix_spaces_exe = '/usr/bin/env dummy-test-python'
+    win32_exe = 'C:\\Dummy Test Path\\Program Files\\Python 3.3\\python.exe'
+
+    def _run_install_scripts(self, install_dir, executable=None):
+        dist = Distribution(self.settings)
+        dist.script_name = 'setup.py'
+        cmd = install_scripts(dist)
+        cmd.install_dir = install_dir
+        if executable is not None:
+            bs = cmd.get_finalized_command('build_scripts')
+            bs.executable = executable
+        cmd.ensure_finalized()
+        with contexts.quiet():
+            cmd.run()
+
+    @pytest.mark.skipif(sys.platform == 'win32', reason='non-Windows only')
+    def test_sys_executable_escaping_unix(self, tmpdir, monkeypatch):
+        """
+        Ensure that shebang is not quoted on Unix when getting the Python exe
+        from sys.executable.
+        """
+        expected = '#!%s\n' % self.unix_exe
+        monkeypatch.setattr('sys.executable', self.unix_exe)
+        with tmpdir.as_cwd():
+            self._run_install_scripts(str(tmpdir))
+            with io.open(str(tmpdir.join('foo')), 'r') as f:
+                actual = f.readline()
+        assert actual == expected
+
+    @pytest.mark.skipif(sys.platform != 'win32', reason='Windows only')
+    def test_sys_executable_escaping_win32(self, tmpdir, monkeypatch):
+        """
+        Ensure that shebang is quoted on Windows when getting the Python exe
+        from sys.executable and it contains a space.
+        """
+        expected = '#!"%s"\n' % self.win32_exe
+        monkeypatch.setattr('sys.executable', self.win32_exe)
+        with tmpdir.as_cwd():
+            self._run_install_scripts(str(tmpdir))
+            with io.open(str(tmpdir.join('foo-script.py')), 'r') as f:
+                actual = f.readline()
+        assert actual == expected
+
+    @pytest.mark.skipif(sys.platform == 'win32', reason='non-Windows only')
+    def test_executable_with_spaces_escaping_unix(self, tmpdir):
+        """
+        Ensure that shebang on Unix is not quoted, even when a value with spaces
+        is specified using --executable.
+        """
+        expected = '#!%s\n' % self.unix_spaces_exe
+        with tmpdir.as_cwd():
+            self._run_install_scripts(str(tmpdir), self.unix_spaces_exe)
+            with io.open(str(tmpdir.join('foo')), 'r') as f:
+                actual = f.readline()
+        assert actual == expected
+
+    @pytest.mark.skipif(sys.platform != 'win32', reason='Windows only')
+    def test_executable_arg_escaping_win32(self, tmpdir):
+        """
+        Ensure that shebang on Windows is quoted when getting a path with spaces
+        from --executable, that is itself properly quoted.
+        """
+        expected = '#!"%s"\n' % self.win32_exe
+        with tmpdir.as_cwd():
+            self._run_install_scripts(str(tmpdir), '"' + self.win32_exe + '"')
+            with io.open(str(tmpdir.join('foo-script.py')), 'r') as f:
+                actual = f.readline()
+        assert actual == expected
diff --git a/setuptools/tests/test_integration.py b/setuptools/tests/test_integration.py
new file mode 100644
index 0000000..3a9a6c5
--- /dev/null
+++ b/setuptools/tests/test_integration.py
@@ -0,0 +1,165 @@
+"""Run some integration tests.
+
+Try to install a few packages.
+"""
+
+import glob
+import os
+import sys
+
+from setuptools.extern.six.moves import urllib
+import pytest
+
+from setuptools.command.easy_install import easy_install
+from setuptools.command import easy_install as easy_install_pkg
+from setuptools.dist import Distribution
+
+
+def setup_module(module):
+    packages = 'stevedore', 'virtualenvwrapper', 'pbr', 'novaclient'
+    for pkg in packages:
+        try:
+            __import__(pkg)
+            tmpl = "Integration tests cannot run when {pkg} is installed"
+            pytest.skip(tmpl.format(**locals()))
+        except ImportError:
+            pass
+
+    try:
+        urllib.request.urlopen('https://pypi.python.org/pypi')
+    except Exception as exc:
+        pytest.skip(str(exc))
+
+
+@pytest.fixture
+def install_context(request, tmpdir, monkeypatch):
+    """Fixture to set up temporary installation directory.
+    """
+    # Save old values so we can restore them.
+    new_cwd = tmpdir.mkdir('cwd')
+    user_base = tmpdir.mkdir('user_base')
+    user_site = tmpdir.mkdir('user_site')
+    install_dir = tmpdir.mkdir('install_dir')
+
+    def fin():
+        # undo the monkeypatch, particularly needed under
+        # windows because of kept handle on cwd
+        monkeypatch.undo()
+        new_cwd.remove()
+        user_base.remove()
+        user_site.remove()
+        install_dir.remove()
+
+    request.addfinalizer(fin)
+
+    # Change the environment and site settings to control where the
+    # files are installed and ensure we do not overwrite anything.
+    monkeypatch.chdir(new_cwd)
+    monkeypatch.setattr(easy_install_pkg, '__file__', user_site.strpath)
+    monkeypatch.setattr('site.USER_BASE', user_base.strpath)
+    monkeypatch.setattr('site.USER_SITE', user_site.strpath)
+    monkeypatch.setattr('sys.path', sys.path + [install_dir.strpath])
+    monkeypatch.setenv('PYTHONPATH', os.path.pathsep.join(sys.path))
+
+    # Set up the command for performing the installation.
+    dist = Distribution()
+    cmd = easy_install(dist)
+    cmd.install_dir = install_dir.strpath
+    return cmd
+
+
+def _install_one(requirement, cmd, pkgname, modulename):
+    cmd.args = [requirement]
+    cmd.ensure_finalized()
+    cmd.run()
+    target = cmd.install_dir
+    dest_path = glob.glob(os.path.join(target, pkgname + '*.egg'))
+    assert dest_path
+    assert os.path.exists(os.path.join(dest_path[0], pkgname, modulename))
+
+
+def test_stevedore(install_context):
+    _install_one('stevedore', install_context,
+                 'stevedore', 'extension.py')
+
+
+@pytest.mark.xfail
+def test_virtualenvwrapper(install_context):
+    _install_one('virtualenvwrapper', install_context,
+                 'virtualenvwrapper', 'hook_loader.py')
+
+
+def test_pbr(install_context):
+    _install_one('pbr', install_context,
+                 'pbr', 'core.py')
+
+
+@pytest.mark.xfail
+def test_python_novaclient(install_context):
+    _install_one('python-novaclient', install_context,
+                 'novaclient', 'base.py')
+
+
+def test_pyuri(install_context):
+    """
+    Install the pyuri package (version 0.3.1 at the time of writing).
+
+    This is also a regression test for issue #1016.
+    """
+    _install_one('pyuri', install_context, 'pyuri', 'uri.py')
+
+    pyuri = install_context.installed_projects['pyuri']
+
+    # The package data should be installed.
+    assert os.path.exists(os.path.join(pyuri.location, 'pyuri', 'uri.regex'))
+
+
+import re
+import subprocess
+import functools
+import tarfile, zipfile
+
+
+build_deps = ['appdirs', 'packaging', 'pyparsing', 'six']
+@pytest.mark.parametrize("build_dep", build_deps)
+@pytest.mark.skipif(sys.version_info < (3, 6), reason='run only on late versions')
+def test_build_deps_on_distutils(request, tmpdir_factory, build_dep):
+    """
+    All setuptools build dependencies must build without
+    setuptools.
+    """
+    if 'pyparsing' in build_dep:
+        pytest.xfail(reason="Project imports setuptools unconditionally")
+    build_target = tmpdir_factory.mktemp('source')
+    build_dir = download_and_extract(request, build_dep, build_target)
+    install_target = tmpdir_factory.mktemp('target')
+    output = install(build_dir, install_target)
+    for line in output.splitlines():
+        match = re.search('Unknown distribution option: (.*)', line)
+        allowed_unknowns = [
+            'test_suite',
+            'tests_require',
+            'install_requires',
+        ]
+        assert not match or match.group(1).strip('"\'') in allowed_unknowns
+
+
+def install(pkg_dir, install_dir):
+    with open(os.path.join(pkg_dir, 'setuptools.py'), 'w') as breaker:
+        breaker.write('raise ImportError()')
+    cmd = [sys.executable, 'setup.py', 'install', '--prefix', install_dir]
+    env = dict(os.environ, PYTHONPATH=pkg_dir)
+    output = subprocess.check_output(cmd, cwd=pkg_dir, env=env, stderr=subprocess.STDOUT)
+    return output.decode('utf-8')
+
+
+def download_and_extract(request, req, target):
+    cmd = [sys.executable, '-m', 'pip', 'download', '--no-deps',
+        '--no-binary', ':all:', req]
+    output = subprocess.check_output(cmd, encoding='utf-8')
+    filename = re.search('Saved (.*)', output).group(1)
+    request.addfinalizer(functools.partial(os.remove, filename))
+    opener = zipfile.ZipFile if filename.endswith('.zip') else tarfile.open
+    with opener(filename) as archive:
+        archive.extractall(target)
+    return os.path.join(target, os.listdir(target)[0])
diff --git a/setuptools/tests/test_manifest.py b/setuptools/tests/test_manifest.py
new file mode 100644
index 0000000..65eec7d
--- /dev/null
+++ b/setuptools/tests/test_manifest.py
@@ -0,0 +1,602 @@
+# -*- coding: utf-8 -*-
+"""sdist tests"""
+
+import contextlib
+import os
+import shutil
+import sys
+import tempfile
+import itertools
+from distutils import log
+from distutils.errors import DistutilsTemplateError
+
+import pkg_resources.py31compat
+from setuptools.command.egg_info import FileList, egg_info, translate_pattern
+from setuptools.dist import Distribution
+from setuptools.extern import six
+from setuptools.tests.textwrap import DALS
+
+import pytest
+
+py3_only = pytest.mark.xfail(six.PY2, reason="Test runs on Python 3 only")
+
+
+def make_local_path(s):
+    """Converts '/' in a string to os.sep"""
+    return s.replace('/', os.sep)
+
+
+SETUP_ATTRS = {
+    'name': 'app',
+    'version': '0.0',
+    'packages': ['app'],
+}
+
+SETUP_PY = """\
+from setuptools import setup
+
+setup(**%r)
+""" % SETUP_ATTRS
+
+
+@contextlib.contextmanager
+def quiet():
+    old_stdout, old_stderr = sys.stdout, sys.stderr
+    sys.stdout, sys.stderr = six.StringIO(), six.StringIO()
+    try:
+        yield
+    finally:
+        sys.stdout, sys.stderr = old_stdout, old_stderr
+
+
+def touch(filename):
+    open(filename, 'w').close()
+
+
+# The set of files always in the manifest, including all files in the
+# .egg-info directory
+default_files = frozenset(map(make_local_path, [
+    'README.rst',
+    'MANIFEST.in',
+    'setup.py',
+    'app.egg-info/PKG-INFO',
+    'app.egg-info/SOURCES.txt',
+    'app.egg-info/dependency_links.txt',
+    'app.egg-info/top_level.txt',
+    'app/__init__.py',
+]))
+
+
+translate_specs = [
+    ('foo', ['foo'], ['bar', 'foobar']),
+    ('foo/bar', ['foo/bar'], ['foo/bar/baz', './foo/bar', 'foo']),
+
+    # Glob matching
+    ('*.txt', ['foo.txt', 'bar.txt'], ['foo/foo.txt']),
+    ('dir/*.txt', ['dir/foo.txt', 'dir/bar.txt', 'dir/.txt'], ['notdir/foo.txt']),
+    ('*/*.py', ['bin/start.py'], []),
+    ('docs/page-?.txt', ['docs/page-9.txt'], ['docs/page-10.txt']),
+
+    # Globstars change what they mean depending upon where they are
+    (
+        'foo/**/bar',
+        ['foo/bing/bar', 'foo/bing/bang/bar', 'foo/bar'],
+        ['foo/abar'],
+    ),
+    (
+        'foo/**',
+        ['foo/bar/bing.py', 'foo/x'],
+        ['/foo/x'],
+    ),
+    (
+        '**',
+        ['x', 'abc/xyz', '@nything'],
+        [],
+    ),
+
+    # Character classes
+    (
+        'pre[one]post',
+        ['preopost', 'prenpost', 'preepost'],
+        ['prepost', 'preonepost'],
+    ),
+
+    (
+        'hello[!one]world',
+        ['helloxworld', 'helloyworld'],
+        ['hellooworld', 'helloworld', 'hellooneworld'],
+    ),
+
+    (
+        '[]one].txt',
+        ['o.txt', '].txt', 'e.txt'],
+        ['one].txt'],
+    ),
+
+    (
+        'foo[!]one]bar',
+        ['fooybar'],
+        ['foo]bar', 'fooobar', 'fooebar'],
+    ),
+
+]
+"""
+A spec of inputs for 'translate_pattern' and matches and mismatches
+for that input.
+"""
+
+match_params = itertools.chain.from_iterable(
+    zip(itertools.repeat(pattern), matches)
+    for pattern, matches, mismatches in translate_specs
+)
+
+
+@pytest.fixture(params=match_params)
+def pattern_match(request):
+    return map(make_local_path, request.param)
+
+
+mismatch_params = itertools.chain.from_iterable(
+    zip(itertools.repeat(pattern), mismatches)
+    for pattern, matches, mismatches in translate_specs
+)
+
+
+@pytest.fixture(params=mismatch_params)
+def pattern_mismatch(request):
+    return map(make_local_path, request.param)
+
+
+def test_translated_pattern_match(pattern_match):
+    pattern, target = pattern_match
+    assert translate_pattern(pattern).match(target)
+
+
+def test_translated_pattern_mismatch(pattern_mismatch):
+    pattern, target = pattern_mismatch
+    assert not translate_pattern(pattern).match(target)
+
+
+class TempDirTestCase(object):
+    def setup_method(self, method):
+        self.temp_dir = tempfile.mkdtemp()
+        self.old_cwd = os.getcwd()
+        os.chdir(self.temp_dir)
+
+    def teardown_method(self, method):
+        os.chdir(self.old_cwd)
+        shutil.rmtree(self.temp_dir)
+
+
+class TestManifestTest(TempDirTestCase):
+    def setup_method(self, method):
+        super(TestManifestTest, self).setup_method(method)
+
+        f = open(os.path.join(self.temp_dir, 'setup.py'), 'w')
+        f.write(SETUP_PY)
+        f.close()
+        """
+        Create a file tree like:
+        - LICENSE
+        - README.rst
+        - testing.rst
+        - .hidden.rst
+        - app/
+            - __init__.py
+            - a.txt
+            - b.txt
+            - c.rst
+            - static/
+                - app.js
+                - app.js.map
+                - app.css
+                - app.css.map
+        """
+
+        for fname in ['README.rst', '.hidden.rst', 'testing.rst', 'LICENSE']:
+            touch(os.path.join(self.temp_dir, fname))
+
+        # Set up the rest of the test package
+        test_pkg = os.path.join(self.temp_dir, 'app')
+        os.mkdir(test_pkg)
+        for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']:
+            touch(os.path.join(test_pkg, fname))
+
+        # Some compiled front-end assets to include
+        static = os.path.join(test_pkg, 'static')
+        os.mkdir(static)
+        for fname in ['app.js', 'app.js.map', 'app.css', 'app.css.map']:
+            touch(os.path.join(static, fname))
+
+    def make_manifest(self, contents):
+        """Write a MANIFEST.in."""
+        with open(os.path.join(self.temp_dir, 'MANIFEST.in'), 'w') as f:
+            f.write(DALS(contents))
+
+    def get_files(self):
+        """Run egg_info and get all the files to include, as a set"""
+        dist = Distribution(SETUP_ATTRS)
+        dist.script_name = 'setup.py'
+        cmd = egg_info(dist)
+        cmd.ensure_finalized()
+
+        cmd.run()
+
+        return set(cmd.filelist.files)
+
+    def test_no_manifest(self):
+        """Check a missing MANIFEST.in includes only the standard files."""
+        assert (default_files - set(['MANIFEST.in'])) == self.get_files()
+
+    def test_empty_files(self):
+        """Check an empty MANIFEST.in includes only the standard files."""
+        self.make_manifest("")
+        assert default_files == self.get_files()
+
+    def test_include(self):
+        """Include extra rst files in the project root."""
+        self.make_manifest("include *.rst")
+        files = default_files | set([
+            'testing.rst', '.hidden.rst'])
+        assert files == self.get_files()
+
+    def test_exclude(self):
+        """Include everything in app/ except the text files"""
+        l = make_local_path
+        self.make_manifest(
+            """
+            include app/*
+            exclude app/*.txt
+            """)
+        files = default_files | set([l('app/c.rst')])
+        assert files == self.get_files()
+
+    def test_include_multiple(self):
+        """Include with multiple patterns."""
+        l = make_local_path
+        self.make_manifest("include app/*.txt app/static/*")
+        files = default_files | set([
+            l('app/a.txt'), l('app/b.txt'),
+            l('app/static/app.js'), l('app/static/app.js.map'),
+            l('app/static/app.css'), l('app/static/app.css.map')])
+        assert files == self.get_files()
+
+    def test_graft(self):
+        """Include the whole app/static/ directory."""
+        l = make_local_path
+        self.make_manifest("graft app/static")
+        files = default_files | set([
+            l('app/static/app.js'), l('app/static/app.js.map'),
+            l('app/static/app.css'), l('app/static/app.css.map')])
+        assert files == self.get_files()
+
+    def test_graft_glob_syntax(self):
+        """Include the whole app/static/ directory."""
+        l = make_local_path
+        self.make_manifest("graft */static")
+        files = default_files | set([
+            l('app/static/app.js'), l('app/static/app.js.map'),
+            l('app/static/app.css'), l('app/static/app.css.map')])
+        assert files == self.get_files()
+
+    def test_graft_global_exclude(self):
+        """Exclude all *.map files in the project."""
+        l = make_local_path
+        self.make_manifest(
+            """
+            graft app/static
+            global-exclude *.map
+            """)
+        files = default_files | set([
+            l('app/static/app.js'), l('app/static/app.css')])
+        assert files == self.get_files()
+
+    def test_global_include(self):
+        """Include all *.rst, *.js, and *.css files in the whole tree."""
+        l = make_local_path
+        self.make_manifest(
+            """
+            global-include *.rst *.js *.css
+            """)
+        files = default_files | set([
+            '.hidden.rst', 'testing.rst', l('app/c.rst'),
+            l('app/static/app.js'), l('app/static/app.css')])
+        assert files == self.get_files()
+
+    def test_graft_prune(self):
+        """Include all files in app/, except for the whole app/static/ dir."""
+        l = make_local_path
+        self.make_manifest(
+            """
+            graft app
+            prune app/static
+            """)
+        files = default_files | set([
+            l('app/a.txt'), l('app/b.txt'), l('app/c.rst')])
+        assert files == self.get_files()
+
+
+class TestFileListTest(TempDirTestCase):
+    """
+    A copy of the relevant bits of distutils/tests/test_filelist.py,
+    to ensure setuptools' version of FileList keeps parity with distutils.
+    """
+
+    def setup_method(self, method):
+        super(TestFileListTest, self).setup_method(method)
+        self.threshold = log.set_threshold(log.FATAL)
+        self._old_log = log.Log._log
+        log.Log._log = self._log
+        self.logs = []
+
+    def teardown_method(self, method):
+        log.set_threshold(self.threshold)
+        log.Log._log = self._old_log
+        super(TestFileListTest, self).teardown_method(method)
+
+    def _log(self, level, msg, args):
+        if level not in (log.DEBUG, log.INFO, log.WARN, log.ERROR, log.FATAL):
+            raise ValueError('%s wrong log level' % str(level))
+        self.logs.append((level, msg, args))
+
+    def get_logs(self, *levels):
+        def _format(msg, args):
+            if len(args) == 0:
+                return msg
+            return msg % args
+        return [_format(msg, args) for level, msg, args
+                in self.logs if level in levels]
+
+    def clear_logs(self):
+        self.logs = []
+
+    def assertNoWarnings(self):
+        assert self.get_logs(log.WARN) == []
+        self.clear_logs()
+
+    def assertWarnings(self):
+        assert len(self.get_logs(log.WARN)) > 0
+        self.clear_logs()
+
+    def make_files(self, files):
+        for file in files:
+            file = os.path.join(self.temp_dir, file)
+            dirname, basename = os.path.split(file)
+            pkg_resources.py31compat.makedirs(dirname, exist_ok=True)
+            open(file, 'w').close()
+
+    def test_process_template_line(self):
+        # testing  all MANIFEST.in template patterns
+        file_list = FileList()
+        l = make_local_path
+
+        # simulated file list
+        self.make_files([
+            'foo.tmp', 'ok', 'xo', 'four.txt',
+            'buildout.cfg',
+            # filelist does not filter out VCS directories,
+            # it's sdist that does
+            l('.hg/last-message.txt'),
+            l('global/one.txt'),
+            l('global/two.txt'),
+            l('global/files.x'),
+            l('global/here.tmp'),
+            l('f/o/f.oo'),
+            l('dir/graft-one'),
+            l('dir/dir2/graft2'),
+            l('dir3/ok'),
+            l('dir3/sub/ok.txt'),
+        ])
+
+        MANIFEST_IN = DALS("""\
+        include ok
+        include xo
+        exclude xo
+        include foo.tmp
+        include buildout.cfg
+        global-include *.x
+        global-include *.txt
+        global-exclude *.tmp
+        recursive-include f *.oo
+        recursive-exclude global *.x
+        graft dir
+        prune dir3
+        """)
+
+        for line in MANIFEST_IN.split('\n'):
+            if not line:
+                continue
+            file_list.process_template_line(line)
+
+        wanted = [
+            'buildout.cfg',
+            'four.txt',
+            'ok',
+            l('.hg/last-message.txt'),
+            l('dir/graft-one'),
+            l('dir/dir2/graft2'),
+            l('f/o/f.oo'),
+            l('global/one.txt'),
+            l('global/two.txt'),
+        ]
+
+        file_list.sort()
+        assert file_list.files == wanted
+
+    def test_exclude_pattern(self):
+        # return False if no match
+        file_list = FileList()
+        assert not file_list.exclude_pattern('*.py')
+
+        # return True if files match
+        file_list = FileList()
+        file_list.files = ['a.py', 'b.py']
+        assert file_list.exclude_pattern('*.py')
+
+        # test excludes
+        file_list = FileList()
+        file_list.files = ['a.py', 'a.txt']
+        file_list.exclude_pattern('*.py')
+        file_list.sort()
+        assert file_list.files == ['a.txt']
+
+    def test_include_pattern(self):
+        # return False if no match
+        file_list = FileList()
+        self.make_files([])
+        assert not file_list.include_pattern('*.py')
+
+        # return True if files match
+        file_list = FileList()
+        self.make_files(['a.py', 'b.txt'])
+        assert file_list.include_pattern('*.py')
+
+        # test * matches all files
+        file_list = FileList()
+        self.make_files(['a.py', 'b.txt'])
+        file_list.include_pattern('*')
+        file_list.sort()
+        assert file_list.files == ['a.py', 'b.txt']
+
+    def test_process_template_line_invalid(self):
+        # invalid lines
+        file_list = FileList()
+        for action in ('include', 'exclude', 'global-include',
+                       'global-exclude', 'recursive-include',
+                       'recursive-exclude', 'graft', 'prune', 'blarg'):
+            try:
+                file_list.process_template_line(action)
+            except DistutilsTemplateError:
+                pass
+            except Exception:
+                assert False, "Incorrect error thrown"
+            else:
+                assert False, "Should have thrown an error"
+
+    def test_include(self):
+        l = make_local_path
+        # include
+        file_list = FileList()
+        self.make_files(['a.py', 'b.txt', l('d/c.py')])
+
+        file_list.process_template_line('include *.py')
+        file_list.sort()
+        assert file_list.files == ['a.py']
+        self.assertNoWarnings()
+
+        file_list.process_template_line('include *.rb')
+        file_list.sort()
+        assert file_list.files == ['a.py']
+        self.assertWarnings()
+
+    def test_exclude(self):
+        l = make_local_path
+        # exclude
+        file_list = FileList()
+        file_list.files = ['a.py', 'b.txt', l('d/c.py')]
+
+        file_list.process_template_line('exclude *.py')
+        file_list.sort()
+        assert file_list.files == ['b.txt', l('d/c.py')]
+        self.assertNoWarnings()
+
+        file_list.process_template_line('exclude *.rb')
+        file_list.sort()
+        assert file_list.files == ['b.txt', l('d/c.py')]
+        self.assertWarnings()
+
+    def test_global_include(self):
+        l = make_local_path
+        # global-include
+        file_list = FileList()
+        self.make_files(['a.py', 'b.txt', l('d/c.py')])
+
+        file_list.process_template_line('global-include *.py')
+        file_list.sort()
+        assert file_list.files == ['a.py', l('d/c.py')]
+        self.assertNoWarnings()
+
+        file_list.process_template_line('global-include *.rb')
+        file_list.sort()
+        assert file_list.files == ['a.py', l('d/c.py')]
+        self.assertWarnings()
+
+    def test_global_exclude(self):
+        l = make_local_path
+        # global-exclude
+        file_list = FileList()
+        file_list.files = ['a.py', 'b.txt', l('d/c.py')]
+
+        file_list.process_template_line('global-exclude *.py')
+        file_list.sort()
+        assert file_list.files == ['b.txt']
+        self.assertNoWarnings()
+
+        file_list.process_template_line('global-exclude *.rb')
+        file_list.sort()
+        assert file_list.files == ['b.txt']
+        self.assertWarnings()
+
+    def test_recursive_include(self):
+        l = make_local_path
+        # recursive-include
+        file_list = FileList()
+        self.make_files(['a.py', l('d/b.py'), l('d/c.txt'), l('d/d/e.py')])
+
+        file_list.process_template_line('recursive-include d *.py')
+        file_list.sort()
+        assert file_list.files == [l('d/b.py'), l('d/d/e.py')]
+        self.assertNoWarnings()
+
+        file_list.process_template_line('recursive-include e *.py')
+        file_list.sort()
+        assert file_list.files == [l('d/b.py'), l('d/d/e.py')]
+        self.assertWarnings()
+
+    def test_recursive_exclude(self):
+        l = make_local_path
+        # recursive-exclude
+        file_list = FileList()
+        file_list.files = ['a.py', l('d/b.py'), l('d/c.txt'), l('d/d/e.py')]
+
+        file_list.process_template_line('recursive-exclude d *.py')
+        file_list.sort()
+        assert file_list.files == ['a.py', l('d/c.txt')]
+        self.assertNoWarnings()
+
+        file_list.process_template_line('recursive-exclude e *.py')
+        file_list.sort()
+        assert file_list.files == ['a.py', l('d/c.txt')]
+        self.assertWarnings()
+
+    def test_graft(self):
+        l = make_local_path
+        # graft
+        file_list = FileList()
+        self.make_files(['a.py', l('d/b.py'), l('d/d/e.py'), l('f/f.py')])
+
+        file_list.process_template_line('graft d')
+        file_list.sort()
+        assert file_list.files == [l('d/b.py'), l('d/d/e.py')]
+        self.assertNoWarnings()
+
+        file_list.process_template_line('graft e')
+        file_list.sort()
+        assert file_list.files == [l('d/b.py'), l('d/d/e.py')]
+        self.assertWarnings()
+
+    def test_prune(self):
+        l = make_local_path
+        # prune
+        file_list = FileList()
+        file_list.files = ['a.py', l('d/b.py'), l('d/d/e.py'), l('f/f.py')]
+
+        file_list.process_template_line('prune d')
+        file_list.sort()
+        assert file_list.files == ['a.py', l('f/f.py')]
+        self.assertNoWarnings()
+
+        file_list.process_template_line('prune e')
+        file_list.sort()
+        assert file_list.files == ['a.py', l('f/f.py')]
+        self.assertWarnings()
diff --git a/setuptools/tests/test_msvc.py b/setuptools/tests/test_msvc.py
new file mode 100644
index 0000000..32d7a90
--- /dev/null
+++ b/setuptools/tests/test_msvc.py
@@ -0,0 +1,178 @@
+"""
+Tests for msvc support module.
+"""
+
+import os
+import contextlib
+import distutils.errors
+import mock
+
+import pytest
+
+from . import contexts
+
+# importing only setuptools should apply the patch
+__import__('setuptools')
+
+pytest.importorskip("distutils.msvc9compiler")
+
+
+def mock_reg(hkcu=None, hklm=None):
+    """
+    Return a mock for distutils.msvc9compiler.Reg, patched
+    to mock out the functions that access the registry.
+    """
+
+    _winreg = getattr(distutils.msvc9compiler, '_winreg', None)
+    winreg = getattr(distutils.msvc9compiler, 'winreg', _winreg)
+
+    hives = {
+        winreg.HKEY_CURRENT_USER: hkcu or {},
+        winreg.HKEY_LOCAL_MACHINE: hklm or {},
+    }
+
+    @classmethod
+    def read_keys(cls, base, key):
+        """Return list of registry keys."""
+        hive = hives.get(base, {})
+        return [
+            k.rpartition('\\')[2]
+            for k in hive if k.startswith(key.lower())
+        ]
+
+    @classmethod
+    def read_values(cls, base, key):
+        """Return dict of registry keys and values."""
+        hive = hives.get(base, {})
+        return dict(
+            (k.rpartition('\\')[2], hive[k])
+            for k in hive if k.startswith(key.lower())
+        )
+
+    return mock.patch.multiple(distutils.msvc9compiler.Reg,
+        read_keys=read_keys, read_values=read_values)
+
+
+class TestModulePatch:
+    """
+    Ensure that importing setuptools is sufficient to replace
+    the standard find_vcvarsall function with a version that
+    recognizes the "Visual C++ for Python" package.
+    """
+
+    key_32 = r'software\microsoft\devdiv\vcforpython\9.0\installdir'
+    key_64 = r'software\wow6432node\microsoft\devdiv\vcforpython\9.0\installdir'
+
+    def test_patched(self):
+        "Test the module is actually patched"
+        mod_name = distutils.msvc9compiler.find_vcvarsall.__module__
+        assert mod_name == "setuptools.msvc", "find_vcvarsall unpatched"
+
+    def test_no_registry_entries_means_nothing_found(self):
+        """
+        No registry entries or environment variable should lead to an error
+        directing the user to download vcpython27.
+        """
+        find_vcvarsall = distutils.msvc9compiler.find_vcvarsall
+        query_vcvarsall = distutils.msvc9compiler.query_vcvarsall
+
+        with contexts.environment(VS90COMNTOOLS=None):
+            with mock_reg():
+                assert find_vcvarsall(9.0) is None
+
+                try:
+                    query_vcvarsall(9.0)
+                except Exception as exc:
+                    expected = distutils.errors.DistutilsPlatformError
+                    assert isinstance(exc, expected)
+                    assert 'aka.ms/vcpython27' in str(exc)
+
+    @pytest.yield_fixture
+    def user_preferred_setting(self):
+        """
+        Set up environment with different install dirs for user vs. system
+        and yield the user_install_dir for the expected result.
+        """
+        with self.mock_install_dir() as user_install_dir:
+            with self.mock_install_dir() as system_install_dir:
+                reg = mock_reg(
+                    hkcu={
+                        self.key_32: user_install_dir,
+                    },
+                    hklm={
+                        self.key_32: system_install_dir,
+                        self.key_64: system_install_dir,
+                    },
+                )
+                with reg:
+                    yield user_install_dir
+
+    def test_prefer_current_user(self, user_preferred_setting):
+        """
+        Ensure user's settings are preferred.
+        """
+        result = distutils.msvc9compiler.find_vcvarsall(9.0)
+        expected = os.path.join(user_preferred_setting, 'vcvarsall.bat')
+        assert expected == result
+
+    @pytest.yield_fixture
+    def local_machine_setting(self):
+        """
+        Set up environment with only the system environment configured.
+        """
+        with self.mock_install_dir() as system_install_dir:
+            reg = mock_reg(
+                hklm={
+                    self.key_32: system_install_dir,
+                },
+            )
+            with reg:
+                yield system_install_dir
+
+    def test_local_machine_recognized(self, local_machine_setting):
+        """
+        Ensure machine setting is honored if user settings are not present.
+        """
+        result = distutils.msvc9compiler.find_vcvarsall(9.0)
+        expected = os.path.join(local_machine_setting, 'vcvarsall.bat')
+        assert expected == result
+
+    @pytest.yield_fixture
+    def x64_preferred_setting(self):
+        """
+        Set up environment with 64-bit and 32-bit system settings configured
+        and yield the canonical location.
+        """
+        with self.mock_install_dir() as x32_dir:
+            with self.mock_install_dir() as x64_dir:
+                reg = mock_reg(
+                    hklm={
+                        # This *should* only exist on 32-bit machines
+                        self.key_32: x32_dir,
+                        # This *should* only exist on 64-bit machines
+                        self.key_64: x64_dir,
+                    },
+                )
+                with reg:
+                    yield x32_dir
+
+    def test_ensure_64_bit_preferred(self, x64_preferred_setting):
+        """
+        Ensure 64-bit system key is preferred.
+        """
+        result = distutils.msvc9compiler.find_vcvarsall(9.0)
+        expected = os.path.join(x64_preferred_setting, 'vcvarsall.bat')
+        assert expected == result
+
+    @staticmethod
+    @contextlib.contextmanager
+    def mock_install_dir():
+        """
+        Make a mock install dir in a unique location so that tests can
+        distinguish which dir was detected in a given scenario.
+        """
+        with contexts.tempdir() as result:
+            vcvarsall = os.path.join(result, 'vcvarsall.bat')
+            with open(vcvarsall, 'w'):
+                pass
+            yield result
diff --git a/setuptools/tests/test_namespaces.py b/setuptools/tests/test_namespaces.py
new file mode 100644
index 0000000..1ac1b35
--- /dev/null
+++ b/setuptools/tests/test_namespaces.py
@@ -0,0 +1,111 @@
+from __future__ import absolute_import, unicode_literals
+
+import os
+import sys
+import subprocess
+
+import pytest
+
+from . import namespaces
+from setuptools.command import test
+
+
+class TestNamespaces:
+
+    @pytest.mark.xfail(sys.version_info < (3, 5),
+        reason="Requires importlib.util.module_from_spec")
+    @pytest.mark.skipif(bool(os.environ.get("APPVEYOR")),
+        reason="https://github.com/pypa/setuptools/issues/851")
+    def test_mixed_site_and_non_site(self, tmpdir):
+        """
+        Installing two packages sharing the same namespace, one installed
+        to a site dir and the other installed just to a path on PYTHONPATH
+        should leave the namespace in tact and both packages reachable by
+        import.
+        """
+        pkg_A = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
+        pkg_B = namespaces.build_namespace_package(tmpdir, 'myns.pkgB')
+        site_packages = tmpdir / 'site-packages'
+        path_packages = tmpdir / 'path-packages'
+        targets = site_packages, path_packages
+        # use pip to install to the target directory
+        install_cmd = [
+            sys.executable,
+            '-m',
+            'pip.__main__',
+            'install',
+            str(pkg_A),
+            '-t', str(site_packages),
+        ]
+        subprocess.check_call(install_cmd)
+        namespaces.make_site_dir(site_packages)
+        install_cmd = [
+            sys.executable,
+            '-m',
+            'pip.__main__',
+            'install',
+            str(pkg_B),
+            '-t', str(path_packages),
+        ]
+        subprocess.check_call(install_cmd)
+        try_import = [
+            sys.executable,
+            '-c', 'import myns.pkgA; import myns.pkgB',
+        ]
+        with test.test.paths_on_pythonpath(map(str, targets)):
+            subprocess.check_call(try_import)
+
+    @pytest.mark.skipif(bool(os.environ.get("APPVEYOR")),
+        reason="https://github.com/pypa/setuptools/issues/851")
+    def test_pkg_resources_import(self, tmpdir):
+        """
+        Ensure that a namespace package doesn't break on import
+        of pkg_resources.
+        """
+        pkg = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
+        target = tmpdir / 'packages'
+        target.mkdir()
+        install_cmd = [
+            sys.executable,
+            '-m', 'easy_install',
+            '-d', str(target),
+            str(pkg),
+        ]
+        with test.test.paths_on_pythonpath([str(target)]):
+            subprocess.check_call(install_cmd)
+        namespaces.make_site_dir(target)
+        try_import = [
+            sys.executable,
+            '-c', 'import pkg_resources',
+        ]
+        with test.test.paths_on_pythonpath([str(target)]):
+            subprocess.check_call(try_import)
+
+    @pytest.mark.skipif(bool(os.environ.get("APPVEYOR")),
+        reason="https://github.com/pypa/setuptools/issues/851")
+    def test_namespace_package_installed_and_cwd(self, tmpdir):
+        """
+        Installing a namespace packages but also having it in the current
+        working directory, only one version should take precedence.
+        """
+        pkg_A = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
+        target = tmpdir / 'packages'
+        # use pip to install to the target directory
+        install_cmd = [
+            sys.executable,
+            '-m',
+            'pip.__main__',
+            'install',
+            str(pkg_A),
+            '-t', str(target),
+        ]
+        subprocess.check_call(install_cmd)
+        namespaces.make_site_dir(target)
+
+        # ensure that package imports and pkg_resources imports
+        pkg_resources_imp = [
+            sys.executable,
+            '-c', 'import pkg_resources; import myns.pkgA',
+        ]
+        with test.test.paths_on_pythonpath([str(target)]):
+            subprocess.check_call(pkg_resources_imp, cwd=str(pkg_A))
diff --git a/setuptools/tests/test_packageindex.py b/setuptools/tests/test_packageindex.py
new file mode 100644
index 0000000..63b9294
--- /dev/null
+++ b/setuptools/tests/test_packageindex.py
@@ -0,0 +1,275 @@
+from __future__ import absolute_import
+
+import sys
+import os
+import distutils.errors
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import urllib, http_client
+
+import pkg_resources
+import setuptools.package_index
+from setuptools.tests.server import IndexServer
+from .textwrap import DALS
+
+
+class TestPackageIndex:
+    def test_regex(self):
+        hash_url = 'http://other_url?:action=show_md5&amp;'
+        hash_url += 'digest=0123456789abcdef0123456789abcdef'
+        doc = """
+            <a href="http://some_url">Name</a>
+            (<a title="MD5 hash"
+            href="{hash_url}">md5</a>)
+        """.lstrip().format(**locals())
+        assert setuptools.package_index.PYPI_MD5.match(doc)
+
+    def test_bad_url_bad_port(self):
+        index = setuptools.package_index.PackageIndex()
+        url = 'http://127.0.0.1:0/nonesuch/test_package_index'
+        try:
+            v = index.open_url(url)
+        except Exception as v:
+            assert url in str(v)
+        else:
+            assert isinstance(v, urllib.error.HTTPError)
+
+    def test_bad_url_typo(self):
+        # issue 16
+        # easy_install inquant.contentmirror.plone breaks because of a typo
+        # in its home URL
+        index = setuptools.package_index.PackageIndex(
+            hosts=('www.example.com',)
+        )
+
+        url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk'
+        try:
+            v = index.open_url(url)
+        except Exception as v:
+            assert url in str(v)
+        else:
+            assert isinstance(v, urllib.error.HTTPError)
+
+    def test_bad_url_bad_status_line(self):
+        index = setuptools.package_index.PackageIndex(
+            hosts=('www.example.com',)
+        )
+
+        def _urlopen(*args):
+            raise http_client.BadStatusLine('line')
+
+        index.opener = _urlopen
+        url = 'http://example.com'
+        try:
+            v = index.open_url(url)
+        except Exception as v:
+            assert 'line' in str(v)
+        else:
+            raise AssertionError('Should have raise here!')
+
+    def test_bad_url_double_scheme(self):
+        """
+        A bad URL with a double scheme should raise a DistutilsError.
+        """
+        index = setuptools.package_index.PackageIndex(
+            hosts=('www.example.com',)
+        )
+
+        # issue 20
+        url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk'
+        try:
+            index.open_url(url)
+        except distutils.errors.DistutilsError as error:
+            msg = six.text_type(error)
+            assert 'nonnumeric port' in msg or 'getaddrinfo failed' in msg or 'Name or service not known' in msg
+            return
+        raise RuntimeError("Did not raise")
+
+    def test_bad_url_screwy_href(self):
+        index = setuptools.package_index.PackageIndex(
+            hosts=('www.example.com',)
+        )
+
+        # issue #160
+        if sys.version_info[0] == 2 and sys.version_info[1] == 7:
+            # this should not fail
+            url = 'http://example.com'
+            page = ('<a href="http://www.famfamfam.com]('
+                    'http://www.famfamfam.com/">')
+            index.process_index(url, page)
+
+    def test_url_ok(self):
+        index = setuptools.package_index.PackageIndex(
+            hosts=('www.example.com',)
+        )
+        url = 'file:///tmp/test_package_index'
+        assert index.url_ok(url, True)
+
+    def test_links_priority(self):
+        """
+        Download links from the pypi simple index should be used before
+        external download links.
+        https://bitbucket.org/tarek/distribute/issue/163
+
+        Usecase :
+        - someone uploads a package on pypi, a md5 is generated
+        - someone manually copies this link (with the md5 in the url) onto an
+          external page accessible from the package page.
+        - someone reuploads the package (with a different md5)
+        - while easy_installing, an MD5 error occurs because the external link
+          is used
+        -> Setuptools should use the link from pypi, not the external one.
+        """
+        if sys.platform.startswith('java'):
+            # Skip this test on jython because binding to :0 fails
+            return
+
+        # start an index server
+        server = IndexServer()
+        server.start()
+        index_url = server.base_url() + 'test_links_priority/simple/'
+
+        # scan a test index
+        pi = setuptools.package_index.PackageIndex(index_url)
+        requirement = pkg_resources.Requirement.parse('foobar')
+        pi.find_packages(requirement)
+        server.stop()
+
+        # the distribution has been found
+        assert 'foobar' in pi
+        # we have only one link, because links are compared without md5
+        assert len(pi['foobar']) == 1
+        # the link should be from the index
+        assert 'correct_md5' in pi['foobar'][0].location
+
+    def test_parse_bdist_wininst(self):
+        parse = setuptools.package_index.parse_bdist_wininst
+
+        actual = parse('reportlab-2.5.win32-py2.4.exe')
+        expected = 'reportlab-2.5', '2.4', 'win32'
+        assert actual == expected
+
+        actual = parse('reportlab-2.5.win32.exe')
+        expected = 'reportlab-2.5', None, 'win32'
+        assert actual == expected
+
+        actual = parse('reportlab-2.5.win-amd64-py2.7.exe')
+        expected = 'reportlab-2.5', '2.7', 'win-amd64'
+        assert actual == expected
+
+        actual = parse('reportlab-2.5.win-amd64.exe')
+        expected = 'reportlab-2.5', None, 'win-amd64'
+        assert actual == expected
+
+    def test__vcs_split_rev_from_url(self):
+        """
+        Test the basic usage of _vcs_split_rev_from_url
+        """
+        vsrfu = setuptools.package_index.PackageIndex._vcs_split_rev_from_url
+        url, rev = vsrfu('https://example.com/bar@2995')
+        assert url == 'https://example.com/bar'
+        assert rev == '2995'
+
+    def test_local_index(self, tmpdir):
+        """
+        local_open should be able to read an index from the file system.
+        """
+        index_file = tmpdir / 'index.html'
+        with index_file.open('w') as f:
+            f.write('<div>content</div>')
+        url = 'file:' + urllib.request.pathname2url(str(tmpdir)) + '/'
+        res = setuptools.package_index.local_open(url)
+        assert 'content' in res.read()
+
+    def test_egg_fragment(self):
+        """
+        EGG fragments must comply to PEP 440
+        """
+        epoch = [
+            '',
+            '1!',
+        ]
+        releases = [
+            '0',
+            '0.0',
+            '0.0.0',
+        ]
+        pre = [
+            'a0',
+            'b0',
+            'rc0',
+        ]
+        post = [
+            '.post0'
+        ]
+        dev = [
+            '.dev0',
+        ]
+        local = [
+            ('', ''),
+            ('+ubuntu.0', '+ubuntu.0'),
+            ('+ubuntu-0', '+ubuntu.0'),
+            ('+ubuntu_0', '+ubuntu.0'),
+        ]
+        versions = [
+            [''.join([e, r, p, l]) for l in ll]
+            for e in epoch
+            for r in releases
+            for p in sum([pre, post, dev], [''])
+            for ll in local]
+        for v, vc in versions:
+            dists = list(setuptools.package_index.distros_for_url(
+                'http://example.com/example.zip#egg=example-' + v))
+            assert dists[0].version == ''
+            assert dists[1].version == vc
+
+
+class TestContentCheckers:
+    def test_md5(self):
+        checker = setuptools.package_index.HashChecker.from_url(
+            'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
+        checker.feed('You should probably not be using MD5'.encode('ascii'))
+        assert checker.hash.hexdigest() == 'f12895fdffbd45007040d2e44df98478'
+        assert checker.is_valid()
+
+    def test_other_fragment(self):
+        "Content checks should succeed silently if no hash is present"
+        checker = setuptools.package_index.HashChecker.from_url(
+            'http://foo/bar#something%20completely%20different')
+        checker.feed('anything'.encode('ascii'))
+        assert checker.is_valid()
+
+    def test_blank_md5(self):
+        "Content checks should succeed if a hash is empty"
+        checker = setuptools.package_index.HashChecker.from_url(
+            'http://foo/bar#md5=')
+        checker.feed('anything'.encode('ascii'))
+        assert checker.is_valid()
+
+    def test_get_hash_name_md5(self):
+        checker = setuptools.package_index.HashChecker.from_url(
+            'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
+        assert checker.hash_name == 'md5'
+
+    def test_report(self):
+        checker = setuptools.package_index.HashChecker.from_url(
+            'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
+        rep = checker.report(lambda x: x, 'My message about %s')
+        assert rep == 'My message about md5'
+
+
+class TestPyPIConfig:
+    def test_percent_in_password(self, tmpdir, monkeypatch):
+        monkeypatch.setitem(os.environ, 'HOME', str(tmpdir))
+        pypirc = tmpdir / '.pypirc'
+        with pypirc.open('w') as strm:
+            strm.write(DALS("""
+                [pypi]
+                repository=https://pypi.org
+                username=jaraco
+                password=pity%
+            """))
+        cfg = setuptools.package_index.PyPIConfig()
+        cred = cfg.creds_by_repository['https://pypi.org']
+        assert cred.username == 'jaraco'
+        assert cred.password == 'pity%'
diff --git a/setuptools/tests/test_sandbox.py b/setuptools/tests/test_sandbox.py
new file mode 100644
index 0000000..d867542
--- /dev/null
+++ b/setuptools/tests/test_sandbox.py
@@ -0,0 +1,133 @@
+"""develop tests
+"""
+import os
+import types
+
+import pytest
+
+import pkg_resources
+import setuptools.sandbox
+
+
+class TestSandbox:
+    def test_devnull(self, tmpdir):
+        with setuptools.sandbox.DirectorySandbox(str(tmpdir)):
+            self._file_writer(os.devnull)
+
+    @staticmethod
+    def _file_writer(path):
+        def do_write():
+            with open(path, 'w') as f:
+                f.write('xxx')
+
+        return do_write
+
+    def test_setup_py_with_BOM(self):
+        """
+        It should be possible to execute a setup.py with a Byte Order Mark
+        """
+        target = pkg_resources.resource_filename(__name__,
+            'script-with-bom.py')
+        namespace = types.ModuleType('namespace')
+        setuptools.sandbox._execfile(target, vars(namespace))
+        assert namespace.result == 'passed'
+
+    def test_setup_py_with_CRLF(self, tmpdir):
+        setup_py = tmpdir / 'setup.py'
+        with setup_py.open('wb') as stream:
+            stream.write(b'"degenerate script"\r\n')
+        setuptools.sandbox._execfile(str(setup_py), globals())
+
+
+class TestExceptionSaver:
+    def test_exception_trapped(self):
+        with setuptools.sandbox.ExceptionSaver():
+            raise ValueError("details")
+
+    def test_exception_resumed(self):
+        with setuptools.sandbox.ExceptionSaver() as saved_exc:
+            raise ValueError("details")
+
+        with pytest.raises(ValueError) as caught:
+            saved_exc.resume()
+
+        assert isinstance(caught.value, ValueError)
+        assert str(caught.value) == 'details'
+
+    def test_exception_reconstructed(self):
+        orig_exc = ValueError("details")
+
+        with setuptools.sandbox.ExceptionSaver() as saved_exc:
+            raise orig_exc
+
+        with pytest.raises(ValueError) as caught:
+            saved_exc.resume()
+
+        assert isinstance(caught.value, ValueError)
+        assert caught.value is not orig_exc
+
+    def test_no_exception_passes_quietly(self):
+        with setuptools.sandbox.ExceptionSaver() as saved_exc:
+            pass
+
+        saved_exc.resume()
+
+    def test_unpickleable_exception(self):
+        class CantPickleThis(Exception):
+            "This Exception is unpickleable because it's not in globals"
+            def __repr__(self):
+                return 'CantPickleThis%r' % (self.args,)
+
+        with setuptools.sandbox.ExceptionSaver() as saved_exc:
+            raise CantPickleThis('detail')
+
+        with pytest.raises(setuptools.sandbox.UnpickleableException) as caught:
+            saved_exc.resume()
+
+        assert str(caught.value) == "CantPickleThis('detail',)"
+
+    def test_unpickleable_exception_when_hiding_setuptools(self):
+        """
+        As revealed in #440, an infinite recursion can occur if an unpickleable
+        exception while setuptools is hidden. Ensure this doesn't happen.
+        """
+
+        class ExceptionUnderTest(Exception):
+            """
+            An unpickleable exception (not in globals).
+            """
+
+        with pytest.raises(setuptools.sandbox.UnpickleableException) as caught:
+            with setuptools.sandbox.save_modules():
+                setuptools.sandbox.hide_setuptools()
+                raise ExceptionUnderTest()
+
+        msg, = caught.value.args
+        assert msg == 'ExceptionUnderTest()'
+
+    def test_sandbox_violation_raised_hiding_setuptools(self, tmpdir):
+        """
+        When in a sandbox with setuptools hidden, a SandboxViolation
+        should reflect a proper exception and not be wrapped in
+        an UnpickleableException.
+        """
+
+        def write_file():
+            "Trigger a SandboxViolation by writing outside the sandbox"
+            with open('/etc/foo', 'w'):
+                pass
+
+        with pytest.raises(setuptools.sandbox.SandboxViolation) as caught:
+            with setuptools.sandbox.save_modules():
+                setuptools.sandbox.hide_setuptools()
+                with setuptools.sandbox.DirectorySandbox(str(tmpdir)):
+                    write_file()
+
+        cmd, args, kwargs = caught.value.args
+        assert cmd == 'open'
+        assert args == ('/etc/foo', 'w')
+        assert kwargs == {}
+
+        msg = str(caught.value)
+        assert 'open' in msg
+        assert "('/etc/foo', 'w')" in msg
diff --git a/setuptools/tests/test_sdist.py b/setuptools/tests/test_sdist.py
new file mode 100644
index 0000000..02222da
--- /dev/null
+++ b/setuptools/tests/test_sdist.py
@@ -0,0 +1,427 @@
+# -*- coding: utf-8 -*-
+"""sdist tests"""
+
+import os
+import shutil
+import sys
+import tempfile
+import unicodedata
+import contextlib
+import io
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import map
+
+import pytest
+
+import pkg_resources
+from setuptools.command.sdist import sdist
+from setuptools.command.egg_info import manifest_maker
+from setuptools.dist import Distribution
+from setuptools.tests import fail_on_ascii
+from .text import Filenames
+
+py3_only = pytest.mark.xfail(six.PY2, reason="Test runs on Python 3 only")
+
+SETUP_ATTRS = {
+    'name': 'sdist_test',
+    'version': '0.0',
+    'packages': ['sdist_test'],
+    'package_data': {'sdist_test': ['*.txt']},
+    'data_files': [("data", [os.path.join("d", "e.dat")])],
+}
+
+SETUP_PY = """\
+from setuptools import setup
+
+setup(**%r)
+""" % SETUP_ATTRS
+
+
+@contextlib.contextmanager
+def quiet():
+    old_stdout, old_stderr = sys.stdout, sys.stderr
+    sys.stdout, sys.stderr = six.StringIO(), six.StringIO()
+    try:
+        yield
+    finally:
+        sys.stdout, sys.stderr = old_stdout, old_stderr
+
+
+# Convert to POSIX path
+def posix(path):
+    if six.PY3 and not isinstance(path, str):
+        return path.replace(os.sep.encode('ascii'), b'/')
+    else:
+        return path.replace(os.sep, '/')
+
+
+# HFS Plus uses decomposed UTF-8
+def decompose(path):
+    if isinstance(path, six.text_type):
+        return unicodedata.normalize('NFD', path)
+    try:
+        path = path.decode('utf-8')
+        path = unicodedata.normalize('NFD', path)
+        path = path.encode('utf-8')
+    except UnicodeError:
+        pass  # Not UTF-8
+    return path
+
+
+def read_all_bytes(filename):
+    with io.open(filename, 'rb') as fp:
+        return fp.read()
+
+
+def latin1_fail():
+    try:
+        desc, filename = tempfile.mkstemp(suffix=Filenames.latin_1)
+        os.close(desc)
+        os.remove(filename)
+    except Exception:
+        return True
+
+
+fail_on_latin1_encoded_filenames = pytest.mark.xfail(
+    latin1_fail(),
+    reason="System does not support latin-1 filenames",
+)
+
+
+class TestSdistTest:
+    def setup_method(self, method):
+        self.temp_dir = tempfile.mkdtemp()
+        f = open(os.path.join(self.temp_dir, 'setup.py'), 'w')
+        f.write(SETUP_PY)
+        f.close()
+
+        # Set up the rest of the test package
+        test_pkg = os.path.join(self.temp_dir, 'sdist_test')
+        os.mkdir(test_pkg)
+        data_folder = os.path.join(self.temp_dir, "d")
+        os.mkdir(data_folder)
+        # *.rst was not included in package_data, so c.rst should not be
+        # automatically added to the manifest when not under version control
+        for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst',
+                      os.path.join(data_folder, "e.dat")]:
+            # Just touch the files; their contents are irrelevant
+            open(os.path.join(test_pkg, fname), 'w').close()
+
+        self.old_cwd = os.getcwd()
+        os.chdir(self.temp_dir)
+
+    def teardown_method(self, method):
+        os.chdir(self.old_cwd)
+        shutil.rmtree(self.temp_dir)
+
+    def test_package_data_in_sdist(self):
+        """Regression test for pull request #4: ensures that files listed in
+        package_data are included in the manifest even if they're not added to
+        version control.
+        """
+
+        dist = Distribution(SETUP_ATTRS)
+        dist.script_name = 'setup.py'
+        cmd = sdist(dist)
+        cmd.ensure_finalized()
+
+        with quiet():
+            cmd.run()
+
+        manifest = cmd.filelist.files
+        assert os.path.join('sdist_test', 'a.txt') in manifest
+        assert os.path.join('sdist_test', 'b.txt') in manifest
+        assert os.path.join('sdist_test', 'c.rst') not in manifest
+        assert os.path.join('d', 'e.dat') in manifest
+
+    def test_defaults_case_sensitivity(self):
+        """
+        Make sure default files (README.*, etc.) are added in a case-sensitive
+        way to avoid problems with packages built on Windows.
+        """
+
+        open(os.path.join(self.temp_dir, 'readme.rst'), 'w').close()
+        open(os.path.join(self.temp_dir, 'SETUP.cfg'), 'w').close()
+
+        dist = Distribution(SETUP_ATTRS)
+        # the extension deliberately capitalized for this test
+        # to make sure the actual filename (not capitalized) gets added
+        # to the manifest
+        dist.script_name = 'setup.PY'
+        cmd = sdist(dist)
+        cmd.ensure_finalized()
+
+        with quiet():
+            cmd.run()
+
+        # lowercase all names so we can test in a
+        # case-insensitive way to make sure the files
+        # are not included.
+        manifest = map(lambda x: x.lower(), cmd.filelist.files)
+        assert 'readme.rst' not in manifest, manifest
+        assert 'setup.py' not in manifest, manifest
+        assert 'setup.cfg' not in manifest, manifest
+
+    @fail_on_ascii
+    def test_manifest_is_written_with_utf8_encoding(self):
+        # Test for #303.
+        dist = Distribution(SETUP_ATTRS)
+        dist.script_name = 'setup.py'
+        mm = manifest_maker(dist)
+        mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
+        os.mkdir('sdist_test.egg-info')
+
+        # UTF-8 filename
+        filename = os.path.join('sdist_test', 'smörbröd.py')
+
+        # Must create the file or it will get stripped.
+        open(filename, 'w').close()
+
+        # Add UTF-8 filename and write manifest
+        with quiet():
+            mm.run()
+            mm.filelist.append(filename)
+            mm.write_manifest()
+
+        contents = read_all_bytes(mm.manifest)
+
+        # The manifest should be UTF-8 encoded
+        u_contents = contents.decode('UTF-8')
+
+        # The manifest should contain the UTF-8 filename
+        if six.PY2:
+            fs_enc = sys.getfilesystemencoding()
+            filename = filename.decode(fs_enc)
+
+        assert posix(filename) in u_contents
+
+    @py3_only
+    @fail_on_ascii
+    def test_write_manifest_allows_utf8_filenames(self):
+        # Test for #303.
+        dist = Distribution(SETUP_ATTRS)
+        dist.script_name = 'setup.py'
+        mm = manifest_maker(dist)
+        mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
+        os.mkdir('sdist_test.egg-info')
+
+        filename = os.path.join(b'sdist_test', Filenames.utf_8)
+
+        # Must touch the file or risk removal
+        open(filename, "w").close()
+
+        # Add filename and write manifest
+        with quiet():
+            mm.run()
+            u_filename = filename.decode('utf-8')
+            mm.filelist.files.append(u_filename)
+            # Re-write manifest
+            mm.write_manifest()
+
+        contents = read_all_bytes(mm.manifest)
+
+        # The manifest should be UTF-8 encoded
+        contents.decode('UTF-8')
+
+        # The manifest should contain the UTF-8 filename
+        assert posix(filename) in contents
+
+        # The filelist should have been updated as well
+        assert u_filename in mm.filelist.files
+
+    @py3_only
+    def test_write_manifest_skips_non_utf8_filenames(self):
+        """
+        Files that cannot be encoded to UTF-8 (specifically, those that
+        weren't originally successfully decoded and have surrogate
+        escapes) should be omitted from the manifest.
+        See https://bitbucket.org/tarek/distribute/issue/303 for history.
+        """
+        dist = Distribution(SETUP_ATTRS)
+        dist.script_name = 'setup.py'
+        mm = manifest_maker(dist)
+        mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
+        os.mkdir('sdist_test.egg-info')
+
+        # Latin-1 filename
+        filename = os.path.join(b'sdist_test', Filenames.latin_1)
+
+        # Add filename with surrogates and write manifest
+        with quiet():
+            mm.run()
+            u_filename = filename.decode('utf-8', 'surrogateescape')
+            mm.filelist.append(u_filename)
+            # Re-write manifest
+            mm.write_manifest()
+
+        contents = read_all_bytes(mm.manifest)
+
+        # The manifest should be UTF-8 encoded
+        contents.decode('UTF-8')
+
+        # The Latin-1 filename should have been skipped
+        assert posix(filename) not in contents
+
+        # The filelist should have been updated as well
+        assert u_filename not in mm.filelist.files
+
+    @fail_on_ascii
+    def test_manifest_is_read_with_utf8_encoding(self):
+        # Test for #303.
+        dist = Distribution(SETUP_ATTRS)
+        dist.script_name = 'setup.py'
+        cmd = sdist(dist)
+        cmd.ensure_finalized()
+
+        # Create manifest
+        with quiet():
+            cmd.run()
+
+        # Add UTF-8 filename to manifest
+        filename = os.path.join(b'sdist_test', Filenames.utf_8)
+        cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
+        manifest = open(cmd.manifest, 'ab')
+        manifest.write(b'\n' + filename)
+        manifest.close()
+
+        # The file must exist to be included in the filelist
+        open(filename, 'w').close()
+
+        # Re-read manifest
+        cmd.filelist.files = []
+        with quiet():
+            cmd.read_manifest()
+
+        # The filelist should contain the UTF-8 filename
+        if six.PY3:
+            filename = filename.decode('utf-8')
+        assert filename in cmd.filelist.files
+
+    @py3_only
+    @fail_on_latin1_encoded_filenames
+    def test_read_manifest_skips_non_utf8_filenames(self):
+        # Test for #303.
+        dist = Distribution(SETUP_ATTRS)
+        dist.script_name = 'setup.py'
+        cmd = sdist(dist)
+        cmd.ensure_finalized()
+
+        # Create manifest
+        with quiet():
+            cmd.run()
+
+        # Add Latin-1 filename to manifest
+        filename = os.path.join(b'sdist_test', Filenames.latin_1)
+        cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
+        manifest = open(cmd.manifest, 'ab')
+        manifest.write(b'\n' + filename)
+        manifest.close()
+
+        # The file must exist to be included in the filelist
+        open(filename, 'w').close()
+
+        # Re-read manifest
+        cmd.filelist.files = []
+        with quiet():
+            cmd.read_manifest()
+
+        # The Latin-1 filename should have been skipped
+        filename = filename.decode('latin-1')
+        assert filename not in cmd.filelist.files
+
+    @fail_on_ascii
+    @fail_on_latin1_encoded_filenames
+    def test_sdist_with_utf8_encoded_filename(self):
+        # Test for #303.
+        dist = Distribution(SETUP_ATTRS)
+        dist.script_name = 'setup.py'
+        cmd = sdist(dist)
+        cmd.ensure_finalized()
+
+        filename = os.path.join(b'sdist_test', Filenames.utf_8)
+        open(filename, 'w').close()
+
+        with quiet():
+            cmd.run()
+
+        if sys.platform == 'darwin':
+            filename = decompose(filename)
+
+        if six.PY3:
+            fs_enc = sys.getfilesystemencoding()
+
+            if sys.platform == 'win32':
+                if fs_enc == 'cp1252':
+                    # Python 3 mangles the UTF-8 filename
+                    filename = filename.decode('cp1252')
+                    assert filename in cmd.filelist.files
+                else:
+                    filename = filename.decode('mbcs')
+                    assert filename in cmd.filelist.files
+            else:
+                filename = filename.decode('utf-8')
+                assert filename in cmd.filelist.files
+        else:
+            assert filename in cmd.filelist.files
+
+    @fail_on_latin1_encoded_filenames
+    def test_sdist_with_latin1_encoded_filename(self):
+        # Test for #303.
+        dist = Distribution(SETUP_ATTRS)
+        dist.script_name = 'setup.py'
+        cmd = sdist(dist)
+        cmd.ensure_finalized()
+
+        # Latin-1 filename
+        filename = os.path.join(b'sdist_test', Filenames.latin_1)
+        open(filename, 'w').close()
+        assert os.path.isfile(filename)
+
+        with quiet():
+            cmd.run()
+
+        if six.PY3:
+            # not all windows systems have a default FS encoding of cp1252
+            if sys.platform == 'win32':
+                # Latin-1 is similar to Windows-1252 however
+                # on mbcs filesys it is not in latin-1 encoding
+                fs_enc = sys.getfilesystemencoding()
+                if fs_enc != 'mbcs':
+                    fs_enc = 'latin-1'
+                filename = filename.decode(fs_enc)
+
+                assert filename in cmd.filelist.files
+            else:
+                # The Latin-1 filename should have been skipped
+                filename = filename.decode('latin-1')
+                filename not in cmd.filelist.files
+        else:
+            # Under Python 2 there seems to be no decoded string in the
+            # filelist.  However, due to decode and encoding of the
+            # file name to get utf-8 Manifest the latin1 maybe excluded
+            try:
+                # fs_enc should match how one is expect the decoding to
+                # be proformed for the manifest output.
+                fs_enc = sys.getfilesystemencoding()
+                filename.decode(fs_enc)
+                assert filename in cmd.filelist.files
+            except UnicodeDecodeError:
+                filename not in cmd.filelist.files
+
+
+def test_default_revctrl():
+    """
+    When _default_revctrl was removed from the `setuptools.command.sdist`
+    module in 10.0, it broke some systems which keep an old install of
+    setuptools (Distribute) around. Those old versions require that the
+    setuptools package continue to implement that interface, so this
+    function provides that interface, stubbed. See #320 for details.
+
+    This interface must be maintained until Ubuntu 12.04 is no longer
+    supported (by Setuptools).
+    """
+    ep_def = 'svn_cvs = setuptools.command.sdist:_default_revctrl'
+    ep = pkg_resources.EntryPoint.parse(ep_def)
+    res = ep.resolve()
+    assert hasattr(res, '__iter__')
diff --git a/setuptools/tests/test_setuptools.py b/setuptools/tests/test_setuptools.py
new file mode 100644
index 0000000..26e37a6
--- /dev/null
+++ b/setuptools/tests/test_setuptools.py
@@ -0,0 +1,368 @@
+"""Tests for the 'setuptools' package"""
+
+import sys
+import os
+import distutils.core
+import distutils.cmd
+from distutils.errors import DistutilsOptionError, DistutilsPlatformError
+from distutils.errors import DistutilsSetupError
+from distutils.core import Extension
+from distutils.version import LooseVersion
+
+import pytest
+
+import setuptools
+import setuptools.dist
+import setuptools.depends as dep
+from setuptools import Feature
+from setuptools.depends import Require
+from setuptools.extern import six
+
+
+def makeSetup(**args):
+    """Return distribution from 'setup(**args)', without executing commands"""
+
+    distutils.core._setup_stop_after = "commandline"
+
+    # Don't let system command line leak into tests!
+    args.setdefault('script_args', ['install'])
+
+    try:
+        return setuptools.setup(**args)
+    finally:
+        distutils.core._setup_stop_after = None
+
+
+needs_bytecode = pytest.mark.skipif(
+    not hasattr(dep, 'get_module_constant'),
+    reason="bytecode support not available",
+)
+
+
+class TestDepends:
+    def testExtractConst(self):
+        if not hasattr(dep, 'extract_constant'):
+            # skip on non-bytecode platforms
+            return
+
+        def f1():
+            global x, y, z
+            x = "test"
+            y = z
+
+        fc = six.get_function_code(f1)
+
+        # unrecognized name
+        assert dep.extract_constant(fc, 'q', -1) is None
+
+        # constant assigned
+        dep.extract_constant(fc, 'x', -1) == "test"
+
+        # expression assigned
+        dep.extract_constant(fc, 'y', -1) == -1
+
+        # recognized name, not assigned
+        dep.extract_constant(fc, 'z', -1) is None
+
+    def testFindModule(self):
+        with pytest.raises(ImportError):
+            dep.find_module('no-such.-thing')
+        with pytest.raises(ImportError):
+            dep.find_module('setuptools.non-existent')
+        f, p, i = dep.find_module('setuptools.tests')
+        f.close()
+
+    @needs_bytecode
+    def testModuleExtract(self):
+        from json import __version__
+        assert dep.get_module_constant('json', '__version__') == __version__
+        assert dep.get_module_constant('sys', 'version') == sys.version
+        assert dep.get_module_constant('setuptools.tests.test_setuptools', '__doc__') == __doc__
+
+    @needs_bytecode
+    def testRequire(self):
+        req = Require('Json', '1.0.3', 'json')
+
+        assert req.name == 'Json'
+        assert req.module == 'json'
+        assert req.requested_version == '1.0.3'
+        assert req.attribute == '__version__'
+        assert req.full_name() == 'Json-1.0.3'
+
+        from json import __version__
+        assert req.get_version() == __version__
+        assert req.version_ok('1.0.9')
+        assert not req.version_ok('0.9.1')
+        assert not req.version_ok('unknown')
+
+        assert req.is_present()
+        assert req.is_current()
+
+        req = Require('Json 3000', '03000', 'json', format=LooseVersion)
+        assert req.is_present()
+        assert not req.is_current()
+        assert not req.version_ok('unknown')
+
+        req = Require('Do-what-I-mean', '1.0', 'd-w-i-m')
+        assert not req.is_present()
+        assert not req.is_current()
+
+        req = Require('Tests', None, 'tests', homepage="http://example.com")
+        assert req.format is None
+        assert req.attribute is None
+        assert req.requested_version is None
+        assert req.full_name() == 'Tests'
+        assert req.homepage == 'http://example.com'
+
+        from setuptools.tests import __path__
+        paths = [os.path.dirname(p) for p in __path__]
+        assert req.is_present(paths)
+        assert req.is_current(paths)
+
+
+class TestDistro:
+    def setup_method(self, method):
+        self.e1 = Extension('bar.ext', ['bar.c'])
+        self.e2 = Extension('c.y', ['y.c'])
+
+        self.dist = makeSetup(
+            packages=['a', 'a.b', 'a.b.c', 'b', 'c'],
+            py_modules=['b.d', 'x'],
+            ext_modules=(self.e1, self.e2),
+            package_dir={},
+        )
+
+    def testDistroType(self):
+        assert isinstance(self.dist, setuptools.dist.Distribution)
+
+    def testExcludePackage(self):
+        self.dist.exclude_package('a')
+        assert self.dist.packages == ['b', 'c']
+
+        self.dist.exclude_package('b')
+        assert self.dist.packages == ['c']
+        assert self.dist.py_modules == ['x']
+        assert self.dist.ext_modules == [self.e1, self.e2]
+
+        self.dist.exclude_package('c')
+        assert self.dist.packages == []
+        assert self.dist.py_modules == ['x']
+        assert self.dist.ext_modules == [self.e1]
+
+        # test removals from unspecified options
+        makeSetup().exclude_package('x')
+
+    def testIncludeExclude(self):
+        # remove an extension
+        self.dist.exclude(ext_modules=[self.e1])
+        assert self.dist.ext_modules == [self.e2]
+
+        # add it back in
+        self.dist.include(ext_modules=[self.e1])
+        assert self.dist.ext_modules == [self.e2, self.e1]
+
+        # should not add duplicate
+        self.dist.include(ext_modules=[self.e1])
+        assert self.dist.ext_modules == [self.e2, self.e1]
+
+    def testExcludePackages(self):
+        self.dist.exclude(packages=['c', 'b', 'a'])
+        assert self.dist.packages == []
+        assert self.dist.py_modules == ['x']
+        assert self.dist.ext_modules == [self.e1]
+
+    def testEmpty(self):
+        dist = makeSetup()
+        dist.include(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
+        dist = makeSetup()
+        dist.exclude(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
+
+    def testContents(self):
+        assert self.dist.has_contents_for('a')
+        self.dist.exclude_package('a')
+        assert not self.dist.has_contents_for('a')
+
+        assert self.dist.has_contents_for('b')
+        self.dist.exclude_package('b')
+        assert not self.dist.has_contents_for('b')
+
+        assert self.dist.has_contents_for('c')
+        self.dist.exclude_package('c')
+        assert not self.dist.has_contents_for('c')
+
+    def testInvalidIncludeExclude(self):
+        with pytest.raises(DistutilsSetupError):
+            self.dist.include(nonexistent_option='x')
+        with pytest.raises(DistutilsSetupError):
+            self.dist.exclude(nonexistent_option='x')
+        with pytest.raises(DistutilsSetupError):
+            self.dist.include(packages={'x': 'y'})
+        with pytest.raises(DistutilsSetupError):
+            self.dist.exclude(packages={'x': 'y'})
+        with pytest.raises(DistutilsSetupError):
+            self.dist.include(ext_modules={'x': 'y'})
+        with pytest.raises(DistutilsSetupError):
+            self.dist.exclude(ext_modules={'x': 'y'})
+
+        with pytest.raises(DistutilsSetupError):
+            self.dist.include(package_dir=['q'])
+        with pytest.raises(DistutilsSetupError):
+            self.dist.exclude(package_dir=['q'])
+
+
+class TestFeatures:
+    def setup_method(self, method):
+        self.req = Require('Distutils', '1.0.3', 'distutils')
+        self.dist = makeSetup(
+            features={
+                'foo': Feature("foo", standard=True, require_features=['baz', self.req]),
+                'bar': Feature("bar", standard=True, packages=['pkg.bar'],
+                               py_modules=['bar_et'], remove=['bar.ext'],
+                               ),
+                'baz': Feature(
+                        "baz", optional=False, packages=['pkg.baz'],
+                        scripts=['scripts/baz_it'],
+                        libraries=[('libfoo', 'foo/foofoo.c')]
+                       ),
+                'dwim': Feature("DWIM", available=False, remove='bazish'),
+            },
+            script_args=['--without-bar', 'install'],
+            packages=['pkg.bar', 'pkg.foo'],
+            py_modules=['bar_et', 'bazish'],
+            ext_modules=[Extension('bar.ext', ['bar.c'])]
+        )
+
+    def testDefaults(self):
+        assert not Feature(
+            "test", standard=True, remove='x', available=False
+        ).include_by_default()
+        assert Feature("test", standard=True, remove='x').include_by_default()
+        # Feature must have either kwargs, removes, or require_features
+        with pytest.raises(DistutilsSetupError):
+            Feature("test")
+
+    def testAvailability(self):
+        with pytest.raises(DistutilsPlatformError):
+            self.dist.features['dwim'].include_in(self.dist)
+
+    def testFeatureOptions(self):
+        dist = self.dist
+        assert (
+            ('with-dwim', None, 'include DWIM') in dist.feature_options
+        )
+        assert (
+            ('without-dwim', None, 'exclude DWIM (default)') in dist.feature_options
+        )
+        assert (
+            ('with-bar', None, 'include bar (default)') in dist.feature_options
+        )
+        assert (
+            ('without-bar', None, 'exclude bar') in dist.feature_options
+        )
+        assert dist.feature_negopt['without-foo'] == 'with-foo'
+        assert dist.feature_negopt['without-bar'] == 'with-bar'
+        assert dist.feature_negopt['without-dwim'] == 'with-dwim'
+        assert ('without-baz' not in dist.feature_negopt)
+
+    def testUseFeatures(self):
+        dist = self.dist
+        assert dist.with_foo == 1
+        assert dist.with_bar == 0
+        assert dist.with_baz == 1
+        assert ('bar_et' not in dist.py_modules)
+        assert ('pkg.bar' not in dist.packages)
+        assert ('pkg.baz' in dist.packages)
+        assert ('scripts/baz_it' in dist.scripts)
+        assert (('libfoo', 'foo/foofoo.c') in dist.libraries)
+        assert dist.ext_modules == []
+        assert dist.require_features == [self.req]
+
+        # If we ask for bar, it should fail because we explicitly disabled
+        # it on the command line
+        with pytest.raises(DistutilsOptionError):
+            dist.include_feature('bar')
+
+    def testFeatureWithInvalidRemove(self):
+        with pytest.raises(SystemExit):
+            makeSetup(features={'x': Feature('x', remove='y')})
+
+
+class TestCommandTests:
+    def testTestIsCommand(self):
+        test_cmd = makeSetup().get_command_obj('test')
+        assert (isinstance(test_cmd, distutils.cmd.Command))
+
+    def testLongOptSuiteWNoDefault(self):
+        ts1 = makeSetup(script_args=['test', '--test-suite=foo.tests.suite'])
+        ts1 = ts1.get_command_obj('test')
+        ts1.ensure_finalized()
+        assert ts1.test_suite == 'foo.tests.suite'
+
+    def testDefaultSuite(self):
+        ts2 = makeSetup(test_suite='bar.tests.suite').get_command_obj('test')
+        ts2.ensure_finalized()
+        assert ts2.test_suite == 'bar.tests.suite'
+
+    def testDefaultWModuleOnCmdLine(self):
+        ts3 = makeSetup(
+            test_suite='bar.tests',
+            script_args=['test', '-m', 'foo.tests']
+        ).get_command_obj('test')
+        ts3.ensure_finalized()
+        assert ts3.test_module == 'foo.tests'
+        assert ts3.test_suite == 'foo.tests.test_suite'
+
+    def testConflictingOptions(self):
+        ts4 = makeSetup(
+            script_args=['test', '-m', 'bar.tests', '-s', 'foo.tests.suite']
+        ).get_command_obj('test')
+        with pytest.raises(DistutilsOptionError):
+            ts4.ensure_finalized()
+
+    def testNoSuite(self):
+        ts5 = makeSetup().get_command_obj('test')
+        ts5.ensure_finalized()
+        assert ts5.test_suite is None
+
+
+@pytest.fixture
+def example_source(tmpdir):
+    tmpdir.mkdir('foo')
+    (tmpdir / 'foo/bar.py').write('')
+    (tmpdir / 'readme.txt').write('')
+    return tmpdir
+
+
+def test_findall(example_source):
+    found = list(setuptools.findall(str(example_source)))
+    expected = ['readme.txt', 'foo/bar.py']
+    expected = [example_source.join(fn) for fn in expected]
+    assert found == expected
+
+
+def test_findall_curdir(example_source):
+    with example_source.as_cwd():
+        found = list(setuptools.findall())
+    expected = ['readme.txt', os.path.join('foo', 'bar.py')]
+    assert found == expected
+
+
+@pytest.fixture
+def can_symlink(tmpdir):
+    """
+    Skip if cannot create a symbolic link
+    """
+    link_fn = 'link'
+    target_fn = 'target'
+    try:
+        os.symlink(target_fn, link_fn)
+    except (OSError, NotImplementedError, AttributeError):
+        pytest.skip("Cannot create symbolic links")
+    os.remove(link_fn)
+
+
+def test_findall_missing_symlink(tmpdir, can_symlink):
+    with tmpdir.as_cwd():
+        os.symlink('foo', 'bar')
+        found = list(setuptools.findall())
+        assert found == []
diff --git a/setuptools/tests/test_test.py b/setuptools/tests/test_test.py
new file mode 100644
index 0000000..960527b
--- /dev/null
+++ b/setuptools/tests/test_test.py
@@ -0,0 +1,131 @@
+# -*- coding: UTF-8 -*-
+
+from __future__ import unicode_literals
+
+from distutils import log
+import os
+import sys
+
+import pytest
+
+from setuptools.command.test import test
+from setuptools.dist import Distribution
+
+from .textwrap import DALS
+from . import contexts
+
+SETUP_PY = DALS("""
+    from setuptools import setup
+
+    setup(name='foo',
+        packages=['name', 'name.space', 'name.space.tests'],
+        namespace_packages=['name'],
+        test_suite='name.space.tests.test_suite',
+    )
+    """)
+
+NS_INIT = DALS("""
+    # -*- coding: Latin-1 -*-
+    # Söme Arbiträry Ünicode to test Distribute Issüé 310
+    try:
+        __import__('pkg_resources').declare_namespace(__name__)
+    except ImportError:
+        from pkgutil import extend_path
+        __path__ = extend_path(__path__, __name__)
+    """)
+
+TEST_PY = DALS("""
+    import unittest
+
+    class TestTest(unittest.TestCase):
+        def test_test(self):
+            print "Foo" # Should fail under Python 3 unless 2to3 is used
+
+    test_suite = unittest.makeSuite(TestTest)
+    """)
+
+
+@pytest.fixture
+def sample_test(tmpdir_cwd):
+    os.makedirs('name/space/tests')
+
+    # setup.py
+    with open('setup.py', 'wt') as f:
+        f.write(SETUP_PY)
+
+    # name/__init__.py
+    with open('name/__init__.py', 'wb') as f:
+        f.write(NS_INIT.encode('Latin-1'))
+
+    # name/space/__init__.py
+    with open('name/space/__init__.py', 'wt') as f:
+        f.write('#empty\n')
+
+    # name/space/tests/__init__.py
+    with open('name/space/tests/__init__.py', 'wt') as f:
+        f.write(TEST_PY)
+
+
+@pytest.fixture
+def quiet_log():
+    # Running some of the other tests will automatically
+    # change the log level to info, messing our output.
+    log.set_verbosity(0)
+
+
+@pytest.mark.usefixtures('sample_test', 'quiet_log')
+def test_test(capfd):
+    params = dict(
+        name='foo',
+        packages=['name', 'name.space', 'name.space.tests'],
+        namespace_packages=['name'],
+        test_suite='name.space.tests.test_suite',
+        use_2to3=True,
+    )
+    dist = Distribution(params)
+    dist.script_name = 'setup.py'
+    cmd = test(dist)
+    cmd.ensure_finalized()
+    # The test runner calls sys.exit
+    with contexts.suppress_exceptions(SystemExit):
+        cmd.run()
+    out, err = capfd.readouterr()
+    assert out == 'Foo\n'
+
+
+@pytest.mark.xfail(
+    sys.version_info < (2, 7),
+    reason="No discover support for unittest on Python 2.6",
+)
+@pytest.mark.usefixtures('tmpdir_cwd', 'quiet_log')
+def test_tests_are_run_once(capfd):
+    params = dict(
+        name='foo',
+        packages=['dummy'],
+    )
+    with open('setup.py', 'wt') as f:
+        f.write('from setuptools import setup; setup(\n')
+        for k, v in sorted(params.items()):
+            f.write('    %s=%r,\n' % (k, v))
+        f.write(')\n')
+    os.makedirs('dummy')
+    with open('dummy/__init__.py', 'wt'):
+        pass
+    with open('dummy/test_dummy.py', 'wt') as f:
+        f.write(DALS(
+            """
+            from __future__ import print_function
+            import unittest
+            class TestTest(unittest.TestCase):
+                def test_test(self):
+                    print('Foo')
+             """))
+    dist = Distribution(params)
+    dist.script_name = 'setup.py'
+    cmd = test(dist)
+    cmd.ensure_finalized()
+    # The test runner calls sys.exit
+    with contexts.suppress_exceptions(SystemExit):
+        cmd.run()
+    out, err = capfd.readouterr()
+    assert out == 'Foo\n'
diff --git a/setuptools/tests/test_unicode_utils.py b/setuptools/tests/test_unicode_utils.py
new file mode 100644
index 0000000..a24a9bd
--- /dev/null
+++ b/setuptools/tests/test_unicode_utils.py
@@ -0,0 +1,10 @@
+from setuptools import unicode_utils
+
+
+def test_filesys_decode_fs_encoding_is_None(monkeypatch):
+    """
+    Test filesys_decode does not raise TypeError when
+    getfilesystemencoding returns None.
+    """
+    monkeypatch.setattr('sys.getfilesystemencoding', lambda: None)
+    unicode_utils.filesys_decode(b'test')
diff --git a/setuptools/tests/test_upload_docs.py b/setuptools/tests/test_upload_docs.py
new file mode 100644
index 0000000..a26e32a
--- /dev/null
+++ b/setuptools/tests/test_upload_docs.py
@@ -0,0 +1,71 @@
+import os
+import zipfile
+import contextlib
+
+import pytest
+
+from setuptools.command.upload_docs import upload_docs
+from setuptools.dist import Distribution
+
+from .textwrap import DALS
+from . import contexts
+
+SETUP_PY = DALS(
+    """
+    from setuptools import setup
+
+    setup(name='foo')
+    """)
+
+
+@pytest.fixture
+def sample_project(tmpdir_cwd):
+    # setup.py
+    with open('setup.py', 'wt') as f:
+        f.write(SETUP_PY)
+
+    os.mkdir('build')
+
+    # A test document.
+    with open('build/index.html', 'w') as f:
+        f.write("Hello world.")
+
+    # An empty folder.
+    os.mkdir('build/empty')
+
+
+@pytest.mark.usefixtures('sample_project')
+@pytest.mark.usefixtures('user_override')
+class TestUploadDocsTest:
+    def test_create_zipfile(self):
+        """
+        Ensure zipfile creation handles common cases, including a folder
+        containing an empty folder.
+        """
+
+        dist = Distribution()
+
+        cmd = upload_docs(dist)
+        cmd.target_dir = cmd.upload_dir = 'build'
+        with contexts.tempdir() as tmp_dir:
+            tmp_file = os.path.join(tmp_dir, 'foo.zip')
+            zip_file = cmd.create_zipfile(tmp_file)
+
+            assert zipfile.is_zipfile(tmp_file)
+
+            with contextlib.closing(zipfile.ZipFile(tmp_file)) as zip_file:
+                assert zip_file.namelist() == ['index.html']
+
+    def test_build_multipart(self):
+        data = dict(
+            a="foo",
+            b="bar",
+            file=('file.txt', b'content'),
+        )
+        body, content_type = upload_docs._build_multipart(data)
+        assert 'form-data' in content_type
+        assert "b'" not in content_type
+        assert 'b"' not in content_type
+        assert isinstance(body, bytes)
+        assert b'foo' in body
+        assert b'content' in body
diff --git a/setuptools/tests/test_virtualenv.py b/setuptools/tests/test_virtualenv.py
new file mode 100644
index 0000000..b66a311
--- /dev/null
+++ b/setuptools/tests/test_virtualenv.py
@@ -0,0 +1,139 @@
+import glob
+import os
+import sys
+
+import pytest
+from pytest import yield_fixture
+from pytest_fixture_config import yield_requires_config
+
+import pytest_virtualenv
+
+from .textwrap import DALS
+from .test_easy_install import make_nspkg_sdist
+
+
+@pytest.fixture(autouse=True)
+def pytest_virtualenv_works(virtualenv):
+    """
+    pytest_virtualenv may not work. if it doesn't, skip these
+    tests. See #1284.
+    """
+    venv_prefix = virtualenv.run(
+        'python -c "import sys; print(sys.prefix)"',
+        capture=True,
+    ).strip()
+    if venv_prefix == sys.prefix:
+        pytest.skip("virtualenv is broken (see pypa/setuptools#1284)")
+
+
+@yield_requires_config(pytest_virtualenv.CONFIG, ['virtualenv_executable'])
+@yield_fixture(scope='function')
+def bare_virtualenv():
+    """ Bare virtualenv (no pip/setuptools/wheel).
+    """
+    with pytest_virtualenv.VirtualEnv(args=(
+        '--no-wheel',
+        '--no-pip',
+        '--no-setuptools',
+    )) as venv:
+        yield venv
+
+
+SOURCE_DIR = os.path.join(os.path.dirname(__file__), '../..')
+
+
+def test_clean_env_install(bare_virtualenv):
+    """
+    Check setuptools can be installed in a clean environment.
+    """
+    bare_virtualenv.run(' && '.join((
+        'cd {source}',
+        'python setup.py install',
+    )).format(source=SOURCE_DIR))
+
+
+def test_pip_upgrade_from_source(virtualenv):
+    """
+    Check pip can upgrade setuptools from source.
+    """
+    dist_dir = virtualenv.workspace
+    if sys.version_info < (2, 7):
+        # Python 2.6 support was dropped in wheel 0.30.0.
+        virtualenv.run('pip install -U "wheel<0.30.0"')
+    # Generate source distribution / wheel.
+    virtualenv.run(' && '.join((
+        'cd {source}',
+        'python setup.py -q sdist -d {dist}',
+        'python setup.py -q bdist_wheel -d {dist}',
+    )).format(source=SOURCE_DIR, dist=dist_dir))
+    sdist = glob.glob(os.path.join(dist_dir, '*.zip'))[0]
+    wheel = glob.glob(os.path.join(dist_dir, '*.whl'))[0]
+    # Then update from wheel.
+    virtualenv.run('pip install ' + wheel)
+    # And finally try to upgrade from source.
+    virtualenv.run('pip install --no-cache-dir --upgrade ' + sdist)
+
+
+def test_test_command_install_requirements(bare_virtualenv, tmpdir):
+    """
+    Check the test command will install all required dependencies.
+    """
+    bare_virtualenv.run(' && '.join((
+        'cd {source}',
+        'python setup.py develop',
+    )).format(source=SOURCE_DIR))
+
+    def sdist(distname, version):
+        dist_path = tmpdir.join('%s-%s.tar.gz' % (distname, version))
+        make_nspkg_sdist(str(dist_path), distname, version)
+        return dist_path
+    dependency_links = [
+        str(dist_path)
+        for dist_path in (
+            sdist('foobar', '2.4'),
+            sdist('bits', '4.2'),
+            sdist('bobs', '6.0'),
+            sdist('pieces', '0.6'),
+        )
+    ]
+    with tmpdir.join('setup.py').open('w') as fp:
+        fp.write(DALS(
+            '''
+            from setuptools import setup
+
+            setup(
+                dependency_links={dependency_links!r},
+                install_requires=[
+                    'barbazquux1; sys_platform in ""',
+                    'foobar==2.4',
+                ],
+                setup_requires='bits==4.2',
+                tests_require="""
+                    bobs==6.0
+                """,
+                extras_require={{
+                    'test': ['barbazquux2'],
+                    ':"" in sys_platform': 'pieces==0.6',
+                    ':python_version > "1"': """
+                        pieces
+                        foobar
+                    """,
+                }}
+            )
+            '''.format(dependency_links=dependency_links)))
+    with tmpdir.join('test.py').open('w') as fp:
+        fp.write(DALS(
+            '''
+            import foobar
+            import bits
+            import bobs
+            import pieces
+
+            open('success', 'w').close()
+            '''))
+    # Run test command for test package.
+    bare_virtualenv.run(' && '.join((
+        'cd {tmpdir}',
+        'python setup.py test -s test',
+    )).format(tmpdir=tmpdir))
+    assert tmpdir.join('success').check()
diff --git a/setuptools/tests/test_wheel.py b/setuptools/tests/test_wheel.py
new file mode 100644
index 0000000..150ac4c
--- /dev/null
+++ b/setuptools/tests/test_wheel.py
@@ -0,0 +1,508 @@
+# -*- coding: utf-8 -*-
+
+"""wheel tests
+"""
+
+from distutils.sysconfig import get_config_var
+from distutils.util import get_platform
+import contextlib
+import glob
+import inspect
+import os
+import subprocess
+import sys
+
+import pytest
+
+from pkg_resources import Distribution, PathMetadata, PY_MAJOR
+from setuptools.wheel import Wheel
+
+from .contexts import tempdir
+from .files import build_files
+from .textwrap import DALS
+
+
+WHEEL_INFO_TESTS = (
+    ('invalid.whl', ValueError),
+    ('simplewheel-2.0-1-py2.py3-none-any.whl', {
+        'project_name': 'simplewheel',
+        'version': '2.0',
+        'build': '1',
+        'py_version': 'py2.py3',
+        'abi': 'none',
+        'platform': 'any',
+    }),
+    ('simple.dist-0.1-py2.py3-none-any.whl', {
+        'project_name': 'simple.dist',
+        'version': '0.1',
+        'build': None,
+        'py_version': 'py2.py3',
+        'abi': 'none',
+        'platform': 'any',
+    }),
+    ('example_pkg_a-1-py3-none-any.whl', {
+        'project_name': 'example_pkg_a',
+        'version': '1',
+        'build': None,
+        'py_version': 'py3',
+        'abi': 'none',
+        'platform': 'any',
+    }),
+    ('PyQt5-5.9-5.9.1-cp35.cp36.cp37-abi3-manylinux1_x86_64.whl', {
+        'project_name': 'PyQt5',
+        'version': '5.9',
+        'build': '5.9.1',
+        'py_version': 'cp35.cp36.cp37',
+        'abi': 'abi3',
+        'platform': 'manylinux1_x86_64',
+    }),
+)
+
+@pytest.mark.parametrize(
+    ('filename', 'info'), WHEEL_INFO_TESTS,
+    ids=[t[0] for t in WHEEL_INFO_TESTS]
+)
+def test_wheel_info(filename, info):
+    if inspect.isclass(info):
+        with pytest.raises(info):
+            Wheel(filename)
+        return
+    w = Wheel(filename)
+    assert {k: getattr(w, k) for k in info.keys()} == info
+
+
+@contextlib.contextmanager
+def build_wheel(extra_file_defs=None, **kwargs):
+    file_defs = {
+        'setup.py': (DALS(
+            '''
+            # -*- coding: utf-8 -*-
+            from setuptools import setup
+            import setuptools
+            setup(**%r)
+            '''
+        ) % kwargs).encode('utf-8'),
+    }
+    if extra_file_defs:
+        file_defs.update(extra_file_defs)
+    with tempdir() as source_dir:
+        build_files(file_defs, source_dir)
+        subprocess.check_call((sys.executable, 'setup.py',
+                               '-q', 'bdist_wheel'), cwd=source_dir)
+        yield glob.glob(os.path.join(source_dir, 'dist', '*.whl'))[0]
+
+
+def tree_set(root):
+    contents = set()
+    for dirpath, dirnames, filenames in os.walk(root):
+        for filename in filenames:
+            contents.add(os.path.join(os.path.relpath(dirpath, root),
+                                      filename))
+    return contents
+
+
+def flatten_tree(tree):
+    """Flatten nested dicts and lists into a full list of paths"""
+    output = set()
+    for node, contents in tree.items():
+        if isinstance(contents, dict):
+            contents = flatten_tree(contents)
+
+        for elem in contents:
+            if isinstance(elem, dict):
+                output |= {os.path.join(node, val)
+                           for val in flatten_tree(elem)}
+            else:
+                output.add(os.path.join(node, elem))
+    return output
+
+
+def format_install_tree(tree):
+    return {x.format(
+        py_version=PY_MAJOR,
+        platform=get_platform(),
+        shlib_ext=get_config_var('EXT_SUFFIX') or get_config_var('SO'))
+            for x in tree}
+
+
+def _check_wheel_install(filename, install_dir, install_tree_includes,
+                         project_name, version, requires_txt):
+    w = Wheel(filename)
+    egg_path = os.path.join(install_dir, w.egg_name())
+    w.install_as_egg(egg_path)
+    if install_tree_includes is not None:
+        install_tree = format_install_tree(install_tree_includes)
+        exp = tree_set(install_dir)
+        assert install_tree.issubset(exp), (install_tree - exp)
+
+    metadata = PathMetadata(egg_path, os.path.join(egg_path, 'EGG-INFO'))
+    dist = Distribution.from_filename(egg_path, metadata=metadata)
+    assert dist.project_name == project_name
+    assert dist.version == version
+    if requires_txt is None:
+        assert not dist.has_metadata('requires.txt')
+    else:
+        assert requires_txt == dist.get_metadata('requires.txt').lstrip()
+
+
+class Record(object):
+
+    def __init__(self, id, **kwargs):
+        self._id = id
+        self._fields = kwargs
+
+    def __repr__(self):
+        return '%s(**%r)' % (self._id, self._fields)
+
+
+WHEEL_INSTALL_TESTS = (
+
+    dict(
+        id='basic',
+        file_defs={
+            'foo': {
+                '__init__.py': ''
+            }
+        },
+        setup_kwargs=dict(
+            packages=['foo'],
+        ),
+        install_tree=flatten_tree({
+            'foo-1.0-py{py_version}.egg': {
+                'EGG-INFO': [
+                    'PKG-INFO',
+                    'RECORD',
+                    'WHEEL',
+                    'top_level.txt'
+                ],
+                'foo': ['__init__.py']
+            }
+        }),
+    ),
+
+    dict(
+        id='utf-8',
+        setup_kwargs=dict(
+            description='Description accentuée',
+        )
+    ),
+
+    dict(
+        id='data',
+        file_defs={
+            'data.txt': DALS(
+                '''
+                Some data...
+                '''
+            ),
+        },
+        setup_kwargs=dict(
+            data_files=[('data_dir', ['data.txt'])],
+        ),
+        install_tree=flatten_tree({
+            'foo-1.0-py{py_version}.egg': {
+                'EGG-INFO': [
+                    'PKG-INFO',
+                    'RECORD',
+                    'WHEEL',
+                    'top_level.txt'
+                ],
+                'data_dir': [
+                    'data.txt'
+                ]
+            }
+        }),
+    ),
+
+    dict(
+        id='extension',
+        file_defs={
+            'extension.c': DALS(
+                '''
+                #include "Python.h"
+
+                #if PY_MAJOR_VERSION >= 3
+
+                static struct PyModuleDef moduledef = {
+                        PyModuleDef_HEAD_INIT,
+                        "extension",
+                        NULL,
+                        0,
+                        NULL,
+                        NULL,
+                        NULL,
+                        NULL,
+                        NULL
+                };
+
+                #define INITERROR return NULL
+
+                PyMODINIT_FUNC PyInit_extension(void)
+
+                #else
+
+                #define INITERROR return
+
+                void initextension(void)
+
+                #endif
+                {
+                #if PY_MAJOR_VERSION >= 3
+                    PyObject *module = PyModule_Create(&moduledef);
+                #else
+                    PyObject *module = Py_InitModule("extension", NULL);
+                #endif
+                    if (module == NULL)
+                        INITERROR;
+                #if PY_MAJOR_VERSION >= 3
+                    return module;
+                #endif
+                }
+                '''
+            ),
+        },
+        setup_kwargs=dict(
+            ext_modules=[
+                Record('setuptools.Extension',
+                       name='extension',
+                       sources=['extension.c'])
+            ],
+        ),
+        install_tree=flatten_tree({
+            'foo-1.0-py{py_version}-{platform}.egg': [
+                'extension{shlib_ext}',
+                {'EGG-INFO': [
+                    'PKG-INFO',
+                    'RECORD',
+                    'WHEEL',
+                    'top_level.txt',
+                ]},
+            ]
+        }),
+    ),
+
+    dict(
+        id='header',
+        file_defs={
+            'header.h': DALS(
+                '''
+                '''
+            ),
+        },
+        setup_kwargs=dict(
+            headers=['header.h'],
+        ),
+        install_tree=flatten_tree({
+            'foo-1.0-py{py_version}.egg': [
+                'header.h',
+                {'EGG-INFO': [
+                    'PKG-INFO',
+                    'RECORD',
+                    'WHEEL',
+                    'top_level.txt',
+                ]},
+            ]
+        }),
+    ),
+
+    dict(
+        id='script',
+        file_defs={
+            'script.py': DALS(
+                '''
+                #/usr/bin/python
+                print('hello world!')
+                '''
+            ),
+            'script.sh': DALS(
+                '''
+                #/bin/sh
+                echo 'hello world!'
+                '''
+            ),
+        },
+        setup_kwargs=dict(
+            scripts=['script.py', 'script.sh'],
+        ),
+        install_tree=flatten_tree({
+            'foo-1.0-py{py_version}.egg': {
+                'EGG-INFO': [
+                    'PKG-INFO',
+                    'RECORD',
+                    'WHEEL',
+                    'top_level.txt',
+                    {'scripts': [
+                        'script.py',
+                        'script.sh'
+                    ]}
+
+                ]
+            }
+        })
+    ),
+
+    dict(
+        id='requires1',
+        install_requires='foobar==2.0',
+        install_tree=flatten_tree({
+            'foo-1.0-py{py_version}.egg': {
+                'EGG-INFO': [
+                    'PKG-INFO',
+                    'RECORD',
+                    'WHEEL',
+                    'requires.txt',
+                    'top_level.txt',
+                ]
+            }
+        }),
+        requires_txt=DALS(
+            '''
+            foobar==2.0
+            '''
+        ),
+    ),
+
+    dict(
+        id='requires2',
+        install_requires='''
+        bar
+        foo<=2.0; %r in sys_platform
+        ''' % sys.platform,
+        requires_txt=DALS(
+            '''
+            bar
+            foo<=2.0
+            '''
+        ),
+    ),
+
+    dict(
+        id='requires3',
+        install_requires='''
+        bar; %r != sys_platform
+        ''' % sys.platform,
+    ),
+
+    dict(
+        id='requires4',
+        install_requires='''
+        foo
+        ''',
+        extras_require={
+            'extra': 'foobar>3',
+        },
+        requires_txt=DALS(
+            '''
+            foo
+
+            [extra]
+            foobar>3
+            '''
+        ),
+    ),
+
+    dict(
+        id='requires5',
+        extras_require={
+            'extra': 'foobar; %r != sys_platform' % sys.platform,
+        },
+        requires_txt=DALS(
+            '''
+            [extra]
+            '''
+        ),
+    ),
+
+    dict(
+        id='namespace_package',
+        file_defs={
+            'foo': {
+                'bar': {
+                    '__init__.py': ''
+                },
+            },
+        },
+        setup_kwargs=dict(
+            namespace_packages=['foo'],
+            packages=['foo.bar'],
+        ),
+        install_tree=flatten_tree({
+            'foo-1.0-py{py_version}.egg': [
+                'foo-1.0-py{py_version}-nspkg.pth',
+                {'EGG-INFO': [
+                    'PKG-INFO',
+                    'RECORD',
+                    'WHEEL',
+                    'namespace_packages.txt',
+                    'top_level.txt',
+                ]},
+                {'foo': [
+                    '__init__.py',
+                    {'bar': ['__init__.py']},
+                ]},
+            ]
+        }),
+    ),
+
+    dict(
+        id='data_in_package',
+        file_defs={
+            'foo': {
+                '__init__.py': '',
+                'data_dir': {
+                    'data.txt': DALS(
+                        '''
+                        Some data...
+                        '''
+                    ),
+                }
+            }
+        },
+        setup_kwargs=dict(
+            packages=['foo'],
+            data_files=[('foo/data_dir', ['foo/data_dir/data.txt'])],
+        ),
+        install_tree=flatten_tree({
+            'foo-1.0-py{py_version}.egg': {
+                'EGG-INFO': [
+                    'PKG-INFO',
+                    'RECORD',
+                    'WHEEL',
+                    'top_level.txt',
+                ],
+                'foo': [
+                    '__init__.py',
+                    {'data_dir': [
+                        'data.txt',
+                    ]}
+                ]
+            }
+        }),
+    ),
+
+)
+
+@pytest.mark.parametrize(
+    'params', WHEEL_INSTALL_TESTS,
+    ids=list(params['id'] for params in WHEEL_INSTALL_TESTS),
+)
+def test_wheel_install(params):
+    project_name = params.get('name', 'foo')
+    version = params.get('version', '1.0')
+    install_requires = params.get('install_requires', [])
+    extras_require = params.get('extras_require', {})
+    requires_txt = params.get('requires_txt', None)
+    install_tree = params.get('install_tree')
+    file_defs = params.get('file_defs', {})
+    setup_kwargs = params.get('setup_kwargs', {})
+    with build_wheel(
+        name=project_name,
+        version=version,
+        install_requires=install_requires,
+        extras_require=extras_require,
+        extra_file_defs=file_defs,
+        **setup_kwargs
+    ) as filename, tempdir() as install_dir:
+        _check_wheel_install(filename, install_dir,
+                             install_tree, project_name,
+                             version, requires_txt)
diff --git a/setuptools/tests/test_windows_wrappers.py b/setuptools/tests/test_windows_wrappers.py
new file mode 100644
index 0000000..d2871c0
--- /dev/null
+++ b/setuptools/tests/test_windows_wrappers.py
@@ -0,0 +1,181 @@
+"""
+Python Script Wrapper for Windows
+=================================
+
+setuptools includes wrappers for Python scripts that allows them to be
+executed like regular windows programs.  There are 2 wrappers, one
+for command-line programs, cli.exe, and one for graphical programs,
+gui.exe.  These programs are almost identical, function pretty much
+the same way, and are generated from the same source file.  The
+wrapper programs are used by copying them to the directory containing
+the script they are to wrap and with the same name as the script they
+are to wrap.
+"""
+
+from __future__ import absolute_import
+
+import sys
+import textwrap
+import subprocess
+
+import pytest
+
+from setuptools.command.easy_install import nt_quote_arg
+import pkg_resources
+
+pytestmark = pytest.mark.skipif(sys.platform != 'win32', reason="Windows only")
+
+
+class WrapperTester:
+    @classmethod
+    def prep_script(cls, template):
+        python_exe = nt_quote_arg(sys.executable)
+        return template % locals()
+
+    @classmethod
+    def create_script(cls, tmpdir):
+        """
+        Create a simple script, foo-script.py
+
+        Note that the script starts with a Unix-style '#!' line saying which
+        Python executable to run.  The wrapper will use this line to find the
+        correct Python executable.
+        """
+
+        script = cls.prep_script(cls.script_tmpl)
+
+        with (tmpdir / cls.script_name).open('w') as f:
+            f.write(script)
+
+        # also copy cli.exe to the sample directory
+        with (tmpdir / cls.wrapper_name).open('wb') as f:
+            w = pkg_resources.resource_string('setuptools', cls.wrapper_source)
+            f.write(w)
+
+
+class TestCLI(WrapperTester):
+    script_name = 'foo-script.py'
+    wrapper_source = 'cli-32.exe'
+    wrapper_name = 'foo.exe'
+    script_tmpl = textwrap.dedent("""
+        #!%(python_exe)s
+        import sys
+        input = repr(sys.stdin.read())
+        print(sys.argv[0][-14:])
+        print(sys.argv[1:])
+        print(input)
+        if __debug__:
+            print('non-optimized')
+        """).lstrip()
+
+    def test_basic(self, tmpdir):
+        """
+        When the copy of cli.exe, foo.exe in this example, runs, it examines
+        the path name it was run with and computes a Python script path name
+        by removing the '.exe' suffix and adding the '-script.py' suffix. (For
+        GUI programs, the suffix '-script.pyw' is added.)  This is why we
+        named out script the way we did.  Now we can run out script by running
+        the wrapper:
+
+        This example was a little pathological in that it exercised windows
+        (MS C runtime) quoting rules:
+
+        - Strings containing spaces are surrounded by double quotes.
+
+        - Double quotes in strings need to be escaped by preceding them with
+          back slashes.
+
+        - One or more backslashes preceding double quotes need to be escaped
+          by preceding each of them with back slashes.
+        """
+        self.create_script(tmpdir)
+        cmd = [
+            str(tmpdir / 'foo.exe'),
+            'arg1',
+            'arg 2',
+            'arg "2\\"',
+            'arg 4\\',
+            'arg5 a\\\\b',
+        ]
+        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
+        stdout, stderr = proc.communicate('hello\nworld\n'.encode('ascii'))
+        actual = stdout.decode('ascii').replace('\r\n', '\n')
+        expected = textwrap.dedent(r"""
+            \foo-script.py
+            ['arg1', 'arg 2', 'arg "2\\"', 'arg 4\\', 'arg5 a\\\\b']
+            'hello\nworld\n'
+            non-optimized
+            """).lstrip()
+        assert actual == expected
+
+    def test_with_options(self, tmpdir):
+        """
+        Specifying Python Command-line Options
+        --------------------------------------
+
+        You can specify a single argument on the '#!' line.  This can be used
+        to specify Python options like -O, to run in optimized mode or -i
+        to start the interactive interpreter.  You can combine multiple
+        options as usual. For example, to run in optimized mode and
+        enter the interpreter after running the script, you could use -Oi:
+        """
+        self.create_script(tmpdir)
+        tmpl = textwrap.dedent("""
+            #!%(python_exe)s  -Oi
+            import sys
+            input = repr(sys.stdin.read())
+            print(sys.argv[0][-14:])
+            print(sys.argv[1:])
+            print(input)
+            if __debug__:
+                print('non-optimized')
+            sys.ps1 = '---'
+            """).lstrip()
+        with (tmpdir / 'foo-script.py').open('w') as f:
+            f.write(self.prep_script(tmpl))
+        cmd = [str(tmpdir / 'foo.exe')]
+        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
+        stdout, stderr = proc.communicate()
+        actual = stdout.decode('ascii').replace('\r\n', '\n')
+        expected = textwrap.dedent(r"""
+            \foo-script.py
+            []
+            ''
+            ---
+            """).lstrip()
+        assert actual == expected
+
+
+class TestGUI(WrapperTester):
+    """
+    Testing the GUI Version
+    -----------------------
+    """
+    script_name = 'bar-script.pyw'
+    wrapper_source = 'gui-32.exe'
+    wrapper_name = 'bar.exe'
+
+    script_tmpl = textwrap.dedent("""
+        #!%(python_exe)s
+        import sys
+        f = open(sys.argv[1], 'wb')
+        bytes_written = f.write(repr(sys.argv[2]).encode('utf-8'))
+        f.close()
+        """).strip()
+
+    def test_basic(self, tmpdir):
+        """Test the GUI version with the simple scipt, bar-script.py"""
+        self.create_script(tmpdir)
+
+        cmd = [
+            str(tmpdir / 'bar.exe'),
+            str(tmpdir / 'test_output.txt'),
+            'Test Argument',
+        ]
+        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
+        stdout, stderr = proc.communicate()
+        assert not stdout
+        assert not stderr
+        with (tmpdir / 'test_output.txt').open('rb') as f_out:
+            actual = f_out.read().decode('ascii')
+        assert actual == repr('Test Argument')
diff --git a/setuptools/tests/text.py b/setuptools/tests/text.py
new file mode 100644
index 0000000..ad2c624
--- /dev/null
+++ b/setuptools/tests/text.py
@@ -0,0 +1,9 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import unicode_literals
+
+
+class Filenames:
+    unicode = 'smörbröd.py'
+    latin_1 = unicode.encode('latin-1')
+    utf_8 = unicode.encode('utf-8')
diff --git a/setuptools/tests/textwrap.py b/setuptools/tests/textwrap.py
new file mode 100644
index 0000000..5cd9e5b
--- /dev/null
+++ b/setuptools/tests/textwrap.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+
+import textwrap
+
+
+def DALS(s):
+    "dedent and left-strip"
+    return textwrap.dedent(s).lstrip()
diff --git a/setuptools/unicode_utils.py b/setuptools/unicode_utils.py
new file mode 100644
index 0000000..7c63efd
--- /dev/null
+++ b/setuptools/unicode_utils.py
@@ -0,0 +1,44 @@
+import unicodedata
+import sys
+
+from setuptools.extern import six
+
+
+# HFS Plus uses decomposed UTF-8
+def decompose(path):
+    if isinstance(path, six.text_type):
+        return unicodedata.normalize('NFD', path)
+    try:
+        path = path.decode('utf-8')
+        path = unicodedata.normalize('NFD', path)
+        path = path.encode('utf-8')
+    except UnicodeError:
+        pass  # Not UTF-8
+    return path
+
+
+def filesys_decode(path):
+    """
+    Ensure that the given path is decoded,
+    NONE when no expected encoding works
+    """
+
+    if isinstance(path, six.text_type):
+        return path
+
+    fs_enc = sys.getfilesystemencoding() or 'utf-8'
+    candidates = fs_enc, 'utf-8'
+
+    for enc in candidates:
+        try:
+            return path.decode(enc)
+        except UnicodeDecodeError:
+            continue
+
+
+def try_encode(string, enc):
+    "turn unicode encoding into a functional routine"
+    try:
+        return string.encode(enc)
+    except UnicodeEncodeError:
+        return None
diff --git a/setuptools/version.py b/setuptools/version.py
new file mode 100644
index 0000000..95e1869
--- /dev/null
+++ b/setuptools/version.py
@@ -0,0 +1,6 @@
+import pkg_resources
+
+try:
+    __version__ = pkg_resources.get_distribution('setuptools').version
+except Exception:
+    __version__ = 'unknown'
diff --git a/setuptools/wheel.py b/setuptools/wheel.py
new file mode 100644
index 0000000..37dfa53
--- /dev/null
+++ b/setuptools/wheel.py
@@ -0,0 +1,163 @@
+'''Wheels support.'''
+
+from distutils.util import get_platform
+import email
+import itertools
+import os
+import re
+import zipfile
+
+from pkg_resources import Distribution, PathMetadata, parse_version
+from setuptools.extern.six import PY3
+from setuptools import Distribution as SetuptoolsDistribution
+from setuptools import pep425tags
+from setuptools.command.egg_info import write_requirements
+
+
+WHEEL_NAME = re.compile(
+    r"""^(?P<project_name>.+?)-(?P<version>\d.*?)
+    ((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)
+    )\.whl$""",
+re.VERBOSE).match
+
+NAMESPACE_PACKAGE_INIT = '''\
+try:
+    __import__('pkg_resources').declare_namespace(__name__)
+except ImportError:
+    __path__ = __import__('pkgutil').extend_path(__path__, __name__)
+'''
+
+
+def unpack(src_dir, dst_dir):
+    '''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
+    for dirpath, dirnames, filenames in os.walk(src_dir):
+        subdir = os.path.relpath(dirpath, src_dir)
+        for f in filenames:
+            src = os.path.join(dirpath, f)
+            dst = os.path.join(dst_dir, subdir, f)
+            os.renames(src, dst)
+        for n, d in reversed(list(enumerate(dirnames))):
+            src = os.path.join(dirpath, d)
+            dst = os.path.join(dst_dir, subdir, d)
+            if not os.path.exists(dst):
+                # Directory does not exist in destination,
+                # rename it and prune it from os.walk list.
+                os.renames(src, dst)
+                del dirnames[n]
+    # Cleanup.
+    for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):
+        assert not filenames
+        os.rmdir(dirpath)
+
+
+class Wheel(object):
+
+    def __init__(self, filename):
+        match = WHEEL_NAME(os.path.basename(filename))
+        if match is None:
+            raise ValueError('invalid wheel name: %r' % filename)
+        self.filename = filename
+        for k, v in match.groupdict().items():
+            setattr(self, k, v)
+
+    def tags(self):
+        '''List tags (py_version, abi, platform) supported by this wheel.'''
+        return itertools.product(self.py_version.split('.'),
+                                 self.abi.split('.'),
+                                 self.platform.split('.'))
+
+    def is_compatible(self):
+        '''Is the wheel is compatible with the current platform?'''
+        supported_tags = pep425tags.get_supported()
+        return next((True for t in self.tags() if t in supported_tags), False)
+
+    def egg_name(self):
+        return Distribution(
+            project_name=self.project_name, version=self.version,
+            platform=(None if self.platform == 'any' else get_platform()),
+        ).egg_name() + '.egg'
+
+    def install_as_egg(self, destination_eggdir):
+        '''Install wheel as an egg directory.'''
+        with zipfile.ZipFile(self.filename) as zf:
+            dist_basename = '%s-%s' % (self.project_name, self.version)
+            dist_info = '%s.dist-info' % dist_basename
+            dist_data = '%s.data' % dist_basename
+            def get_metadata(name):
+                with zf.open('%s/%s' % (dist_info, name)) as fp:
+                    value = fp.read().decode('utf-8') if PY3 else fp.read()
+                    return email.parser.Parser().parsestr(value)
+            wheel_metadata = get_metadata('WHEEL')
+            dist_metadata = get_metadata('METADATA')
+            # Check wheel format version is supported.
+            wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
+            if not parse_version('1.0') <= wheel_version < parse_version('2.0dev0'):
+                raise ValueError('unsupported wheel format version: %s' % wheel_version)
+            # Extract to target directory.
+            os.mkdir(destination_eggdir)
+            zf.extractall(destination_eggdir)
+            # Convert metadata.
+            dist_info = os.path.join(destination_eggdir, dist_info)
+            dist = Distribution.from_location(
+                destination_eggdir, dist_info,
+                metadata=PathMetadata(destination_eggdir, dist_info)
+            )
+            # Note: we need to evaluate and strip markers now,
+            # as we can't easily convert back from the syntax:
+            # foobar; "linux" in sys_platform and extra == 'test'
+            def raw_req(req):
+                req.marker = None
+                return str(req)
+            install_requires = list(sorted(map(raw_req, dist.requires())))
+            extras_require = {
+                extra: list(sorted(
+                    req
+                    for req in map(raw_req, dist.requires((extra,)))
+                    if req not in install_requires
+                ))
+                for extra in dist.extras
+            }
+            egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
+            os.rename(dist_info, egg_info)
+            os.rename(os.path.join(egg_info, 'METADATA'),
+                      os.path.join(egg_info, 'PKG-INFO'))
+            setup_dist = SetuptoolsDistribution(attrs=dict(
+                install_requires=install_requires,
+                extras_require=extras_require,
+            ))
+            write_requirements(setup_dist.get_command_obj('egg_info'),
+                               None, os.path.join(egg_info, 'requires.txt'))
+            # Move data entries to their correct location.
+            dist_data = os.path.join(destination_eggdir, dist_data)
+            dist_data_scripts = os.path.join(dist_data, 'scripts')
+            if os.path.exists(dist_data_scripts):
+                egg_info_scripts = os.path.join(destination_eggdir,
+                                                'EGG-INFO', 'scripts')
+                os.mkdir(egg_info_scripts)
+                for entry in os.listdir(dist_data_scripts):
+                    # Remove bytecode, as it's not properly handled
+                    # during easy_install scripts install phase.
+                    if entry.endswith('.pyc'):
+                        os.unlink(os.path.join(dist_data_scripts, entry))
+                    else:
+                        os.rename(os.path.join(dist_data_scripts, entry),
+                                  os.path.join(egg_info_scripts, entry))
+                os.rmdir(dist_data_scripts)
+            for subdir in filter(os.path.exists, (
+                os.path.join(dist_data, d)
+                for d in ('data', 'headers', 'purelib', 'platlib')
+            )):
+                unpack(subdir, destination_eggdir)
+            if os.path.exists(dist_data):
+                os.rmdir(dist_data)
+            # Fix namespace packages.
+            namespace_packages = os.path.join(egg_info, 'namespace_packages.txt')
+            if os.path.exists(namespace_packages):
+                with open(namespace_packages) as fp:
+                    namespace_packages = fp.read().split()
+                for mod in namespace_packages:
+                    mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
+                    mod_init = os.path.join(mod_dir, '__init__.py')
+                    if os.path.exists(mod_dir) and not os.path.exists(mod_init):
+                        with open(mod_init, 'w') as fp:
+                            fp.write(NAMESPACE_PACKAGE_INIT)
diff --git a/setuptools/windows_support.py b/setuptools/windows_support.py
new file mode 100644
index 0000000..cb977cf
--- /dev/null
+++ b/setuptools/windows_support.py
@@ -0,0 +1,29 @@
+import platform
+import ctypes
+
+
+def windows_only(func):
+    if platform.system() != 'Windows':
+        return lambda *args, **kwargs: None
+    return func
+
+
+@windows_only
+def hide_file(path):
+    """
+    Set the hidden attribute on a file or directory.
+
+    From http://stackoverflow.com/questions/19622133/
+
+    `path` must be text.
+    """
+    __import__('ctypes.wintypes')
+    SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
+    SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
+    SetFileAttributes.restype = ctypes.wintypes.BOOL
+
+    FILE_ATTRIBUTE_HIDDEN = 0x02
+
+    ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
+    if not ret:
+        raise ctypes.WinError()
diff --git a/tests/manual_test.py b/tests/manual_test.py
new file mode 100644
index 0000000..e5aaf17
--- /dev/null
+++ b/tests/manual_test.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import shutil
+import tempfile
+import subprocess
+from distutils.command.install import INSTALL_SCHEMES
+from string import Template
+
+from six.moves import urllib
+
+
+def _system_call(*args):
+    assert subprocess.call(args) == 0
+
+
+def tempdir(func):
+    def _tempdir(*args, **kwargs):
+        test_dir = tempfile.mkdtemp()
+        old_dir = os.getcwd()
+        os.chdir(test_dir)
+        try:
+            return func(*args, **kwargs)
+        finally:
+            os.chdir(old_dir)
+            shutil.rmtree(test_dir)
+
+    return _tempdir
+
+
+SIMPLE_BUILDOUT = """\
+[buildout]
+
+parts = eggs
+
+[eggs]
+recipe = zc.recipe.egg
+
+eggs =
+    extensions
+"""
+
+BOOTSTRAP = 'http://downloads.buildout.org/1/bootstrap.py'
+PYVER = sys.version.split()[0][:3]
+
+_VARS = {'base': '.',
+         'py_version_short': PYVER}
+
+scheme = 'nt' if sys.platform == 'win32' else 'unix_prefix'
+PURELIB = INSTALL_SCHEMES[scheme]['purelib']
+
+
+@tempdir
+def test_virtualenv():
+    """virtualenv with setuptools"""
+    purelib = os.path.abspath(Template(PURELIB).substitute(**_VARS))
+    _system_call('virtualenv', '--no-site-packages', '.')
+    _system_call('bin/easy_install', 'setuptools==dev')
+    # linux specific
+    site_pkg = os.listdir(purelib)
+    site_pkg.sort()
+    assert 'setuptools' in site_pkg[0]
+    easy_install = os.path.join(purelib, 'easy-install.pth')
+    with open(easy_install) as f:
+        res = f.read()
+    assert 'setuptools' in res
+
+
+@tempdir
+def test_full():
+    """virtualenv + pip + buildout"""
+    _system_call('virtualenv', '--no-site-packages', '.')
+    _system_call('bin/easy_install', '-q', 'setuptools==dev')
+    _system_call('bin/easy_install', '-qU', 'setuptools==dev')
+    _system_call('bin/easy_install', '-q', 'pip')
+    _system_call('bin/pip', 'install', '-q', 'zc.buildout')
+
+    with open('buildout.cfg', 'w') as f:
+        f.write(SIMPLE_BUILDOUT)
+
+    with open('bootstrap.py', 'w') as f:
+        f.write(urllib.request.urlopen(BOOTSTRAP).read())
+
+    _system_call('bin/python', 'bootstrap.py')
+    _system_call('bin/buildout', '-q')
+    eggs = os.listdir('eggs')
+    eggs.sort()
+    assert len(eggs) == 3
+    assert eggs[1].startswith('setuptools')
+    del eggs[1]
+    assert eggs == ['extensions-0.3-py2.6.egg',
+        'zc.recipe.egg-1.2.2-py2.6.egg']
+
+
+if __name__ == '__main__':
+    test_virtualenv()
+    test_full()
diff --git a/tests/test_pypi.py b/tests/test_pypi.py
new file mode 100644
index 0000000..b3425e5
--- /dev/null
+++ b/tests/test_pypi.py
@@ -0,0 +1,82 @@
+import os
+import subprocess
+
+import virtualenv
+from setuptools.extern.six.moves import http_client
+from setuptools.extern.six.moves import xmlrpc_client
+
+TOP = 200
+PYPI_HOSTNAME = 'pypi.python.org'
+
+
+def rpc_pypi(method, *args):
+    """Call an XML-RPC method on the Pypi server."""
+    conn = http_client.HTTPSConnection(PYPI_HOSTNAME)
+    headers = {'Content-Type': 'text/xml'}
+    payload = xmlrpc_client.dumps(args, method)
+
+    conn.request("POST", "/pypi", payload, headers)
+    response = conn.getresponse()
+    if response.status == 200:
+        result = xmlrpc_client.loads(response.read())[0][0]
+        return result
+    else:
+        raise RuntimeError("Unable to download the list of top "
+                           "packages from Pypi.")
+
+
+def get_top_packages(limit):
+    """Collect the name of the top packages on Pypi."""
+    packages = rpc_pypi('top_packages')
+    return packages[:limit]
+
+
+def _package_install(package_name, tmp_dir=None, local_setuptools=True):
+    """Try to install a package and return the exit status.
+
+    This function creates a virtual environment, install setuptools using pip
+    and then install the required package. If local_setuptools is True, it
+    will install the local version of setuptools.
+    """
+    package_dir = os.path.join(tmp_dir, "test_%s" % package_name)
+    if not local_setuptools:
+        package_dir = package_dir + "_baseline"
+
+    virtualenv.create_environment(package_dir)
+
+    pip_path = os.path.join(package_dir, "bin", "pip")
+    if local_setuptools:
+        subprocess.check_call([pip_path, "install", "."])
+    returncode = subprocess.call([pip_path, "install", package_name])
+    return returncode
+
+
+def test_package_install(package_name, tmpdir):
+    """Test to verify the outcome of installing a package.
+
+    This test compare that the return code when installing a package is the
+    same as with the current stable version of setuptools.
+    """
+    new_exit_status = _package_install(package_name, tmp_dir=str(tmpdir))
+    if new_exit_status:
+        print("Installation failed, testing against stable setuptools",
+              package_name)
+        old_exit_status = _package_install(package_name, tmp_dir=str(tmpdir),
+                                           local_setuptools=False)
+        assert new_exit_status == old_exit_status
+
+
+def pytest_generate_tests(metafunc):
+    """Generator function for test_package_install.
+
+    This function will generate calls to test_package_install. If a package
+    list has been specified on the command line, it will be used. Otherwise,
+    Pypi will be queried to get the current list of top packages.
+    """
+    if "package_name" in metafunc.fixturenames:
+        if not metafunc.config.option.package_name:
+            packages = get_top_packages(TOP)
+            packages = [name for name, downloads in packages]
+        else:
+            packages = metafunc.config.option.package_name
+        metafunc.parametrize("package_name", packages)
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..a0c4cdf
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,41 @@
+# Note: Run "python bootstrap.py" before running Tox, to generate metadata.
+#
+# To run Tox against all supported Python interpreters, you can set:
+#
+# export TOXENV='py27,py3{3,4,5,6},pypy,pypy3'
+
+[tox]
+envlist=python
+
+[testenv]
+deps=-rtests/requirements.txt
+setenv=COVERAGE_FILE={toxworkdir}/.coverage.{envname}
+# TODO: The passed environment variables came from copying other tox.ini files
+# These should probably be individually annotated to explain what needs them.
+passenv=APPDATA HOMEDRIVE HOMEPATH windir APPVEYOR APPVEYOR_* CI CODECOV_* TRAVIS TRAVIS_*
+commands=pytest --cov-config={toxinidir}/tox.ini --cov-report= {posargs}
+usedevelop=True
+
+
+[testenv:coverage]
+description=Combine coverage data and create report
+deps=coverage
+skip_install=True
+changedir={toxworkdir}
+setenv=COVERAGE_FILE=.coverage
+commands=coverage erase
+         coverage combine
+         coverage {posargs:xml}
+
+[testenv:codecov]
+description=[Only run on CI]: Upload coverage data to codecov
+deps=codecov
+skip_install=True
+commands=codecov --file {toxworkdir}/coverage.xml
+
+[coverage:run]
+source=
+	pkg_resources
+	setuptools
+omit=
+	*/_vendor/*